aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap3
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt6
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt111
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt2
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt2
-rw-r--r--Documentation/devicetree/bindings/display/panel/ampire,am-480272h3tmqw-t01h.txt26
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,s6e3ha2.txt28
-rw-r--r--Documentation/devicetree/bindings/display/panel/sitronix,st7789v.txt37
-rw-r--r--Documentation/devicetree/bindings/display/panel/winstar,wf35ltiacd.txt48
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt5
-rw-r--r--Documentation/devicetree/bindings/rng/omap_rng.txt3
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/filesystems/Locking3
-rw-r--r--Documentation/filesystems/porting6
-rw-r--r--Documentation/filesystems/vfs.txt3
-rw-r--r--Documentation/gpu/bridge/dw-hdmi.rst15
-rw-r--r--Documentation/gpu/drm-internals.rst50
-rw-r--r--Documentation/gpu/drm-uapi.rst24
-rw-r--r--Documentation/gpu/index.rst2
-rw-r--r--Documentation/gpu/meson.rst61
-rw-r--r--Documentation/gpu/todo.rst31
-rw-r--r--Documentation/media/uapi/v4l/subdev-formats.rst960
-rw-r--r--Documentation/pinctrl.txt8
-rw-r--r--Documentation/process/stable-kernel-rules.rst2
-rw-r--r--Documentation/virtual/kvm/api.txt63
-rw-r--r--Documentation/virtual/kvm/devices/arm-vgic.txt6
-rw-r--r--MAINTAINERS32
-rw-r--r--Makefile16
-rw-r--r--arch/alpha/kernel/osf_sys.c2
-rw-r--r--arch/arc/boot/dts/skeleton.dtsi1
-rw-r--r--arch/arc/boot/dts/skeleton_hs.dtsi1
-rw-r--r--arch/arc/boot/dts/skeleton_hs_idu.dtsi21
-rw-r--r--arch/arc/boot/dts/vdk_axs10x_mb.dtsi20
-rw-r--r--arch/arc/include/asm/kprobes.h4
-rw-r--r--arch/arc/kernel/entry-arcv2.S12
-rw-r--r--arch/arc/kernel/setup.c16
-rw-r--r--arch/arc/mm/cache.c3
-rw-r--r--arch/arm/boot/dts/am335x-baltos.dtsi2
-rw-r--r--arch/arm/boot/dts/am335x-evmsk.dts1
-rw-r--r--arch/arm/boot/dts/dra7.dtsi2
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-som.dtsi2
-rw-r--r--arch/arm/boot/dts/sun8i-a33.dtsi16
-rw-r--r--arch/arm/kvm/arm.c3
-rw-r--r--arch/arm/kvm/mmu.c23
-rw-r--r--arch/arm/mach-omap2/common.h1
-rw-r--r--arch/arm/mach-omap2/omap-hotplug.c2
-rw-r--r--arch/arm/mach-omap2/omap-mpuss-lowpower.c22
-rw-r--r--arch/arm/mach-omap2/omap-smc.S1
-rw-r--r--arch/arm/mach-omap2/omap-smp.c90
-rw-r--r--arch/arm/mach-omap2/omap_device.c8
-rw-r--r--arch/arm/mach-orion5x/Kconfig1
-rw-r--r--arch/arm/mm/dma-mapping.c20
-rw-r--r--arch/arm/mm/nommu.c5
-rw-r--r--arch/arm/plat-orion/common.c5
-rw-r--r--arch/arm/probes/kprobes/core.c49
-rw-r--r--arch/arm/probes/kprobes/test-core.c11
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi2
-rw-r--r--arch/arm64/include/asm/current.h2
-rw-r--r--arch/arm64/kernel/smp.c2
-rw-r--r--arch/arm64/kernel/vdso/.gitignore1
-rw-r--r--arch/arm64/mm/fault.c42
-rw-r--r--arch/arm64/mm/hugetlbpage.c14
-rw-r--r--arch/c6x/kernel/ptrace.c41
-rw-r--r--arch/h8300/kernel/ptrace.c8
-rw-r--r--arch/ia64/include/asm/asm-prototypes.h29
-rw-r--r--arch/ia64/lib/Makefile16
-rw-r--r--arch/m68k/configs/amiga_defconfig14
-rw-r--r--arch/m68k/configs/apollo_defconfig14
-rw-r--r--arch/m68k/configs/atari_defconfig14
-rw-r--r--arch/m68k/configs/bvme6000_defconfig14
-rw-r--r--arch/m68k/configs/hp300_defconfig14
-rw-r--r--arch/m68k/configs/mac_defconfig14
-rw-r--r--arch/m68k/configs/multi_defconfig14
-rw-r--r--arch/m68k/configs/mvme147_defconfig14
-rw-r--r--arch/m68k/configs/mvme16x_defconfig14
-rw-r--r--arch/m68k/configs/q40_defconfig14
-rw-r--r--arch/m68k/configs/sun3_defconfig14
-rw-r--r--arch/m68k/configs/sun3x_defconfig14
-rw-r--r--arch/m68k/include/asm/bitops.h2
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h1
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/metag/include/asm/uaccess.h15
-rw-r--r--arch/metag/kernel/ptrace.c19
-rw-r--r--arch/metag/lib/usercopy.c312
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/include/asm/fpu.h1
-rw-r--r--arch/mips/include/asm/irq.h15
-rw-r--r--arch/mips/include/asm/spinlock.h8
-rw-r--r--arch/mips/include/uapi/asm/unistd.h15
-rw-r--r--arch/mips/kernel/asm-offsets.c1
-rw-r--r--arch/mips/kernel/cps-vec.S2
-rw-r--r--arch/mips/kernel/cpu-probe.c2
-rw-r--r--arch/mips/kernel/genex.S12
-rw-r--r--arch/mips/kernel/process.c56
-rw-r--r--arch/mips/kernel/ptrace.c3
-rw-r--r--arch/mips/kernel/scall32-o32.S1
-rw-r--r--arch/mips/kernel/scall64-64.S1
-rw-r--r--arch/mips/kernel/scall64-n32.S1
-rw-r--r--arch/mips/kernel/scall64-o32.S1
-rw-r--r--arch/mips/kernel/traps.c17
-rw-r--r--arch/mips/lantiq/xway/sysctrl.c2
-rw-r--r--arch/mips/mm/c-r4k.c2
-rw-r--r--arch/mips/mm/tlbex.c25
-rw-r--r--arch/mips/ralink/rt3883.c4
-rw-r--r--arch/nios2/kernel/prom.c7
-rw-r--r--arch/nios2/kernel/setup.c3
-rw-r--r--arch/parisc/include/asm/uaccess.h59
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c10
-rw-r--r--arch/parisc/kernel/process.c2
-rw-r--r--arch/parisc/lib/Makefile2
-rw-r--r--arch/parisc/lib/fixup.S98
-rw-r--r--arch/parisc/lib/lusercopy.S319
-rw-r--r--arch/parisc/lib/memcpy.c461
-rw-r--r--arch/parisc/mm/fault.c17
-rw-r--r--arch/powerpc/crypto/crc32c-vpmsum_glue.c3
-rw-r--r--arch/powerpc/kernel/align.c27
-rw-r--r--arch/powerpc/kernel/misc_64.S4
-rw-r--r--arch/powerpc/kernel/setup_64.c9
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c4
-rw-r--r--arch/powerpc/mm/hash_native_64.c7
-rw-r--r--arch/s390/boot/compressed/misc.c35
-rw-r--r--arch/s390/include/asm/sections.h1
-rw-r--r--arch/s390/include/asm/uaccess.h2
-rw-r--r--arch/s390/kernel/smp.c5
-rw-r--r--arch/s390/kernel/vmlinux.lds.S2
-rw-r--r--arch/s390/kvm/gaccess.c7
-rw-r--r--arch/sparc/include/asm/page_64.h3
-rw-r--r--arch/sparc/include/asm/pgtable_64.h15
-rw-r--r--arch/sparc/include/asm/processor_32.h6
-rw-r--r--arch/sparc/include/asm/processor_64.h4
-rw-r--r--arch/sparc/kernel/head_64.S4
-rw-r--r--arch/sparc/kernel/misctrap.S1
-rw-r--r--arch/sparc/kernel/ptrace_64.c2
-rw-r--r--arch/sparc/kernel/rtrap_64.S1
-rw-r--r--arch/sparc/kernel/spiterrs.S1
-rw-r--r--arch/sparc/kernel/sun4v_tlb_miss.S1
-rw-r--r--arch/sparc/kernel/urtt_fill.S1
-rw-r--r--arch/sparc/kernel/winfixup.S2
-rw-r--r--arch/sparc/lib/NG2memcpy.S4
-rw-r--r--arch/sparc/lib/NG4memcpy.S1
-rw-r--r--arch/sparc/lib/NG4memset.S1
-rw-r--r--arch/sparc/lib/NGmemcpy.S1
-rw-r--r--arch/sparc/mm/hugetlbpage.c9
-rw-r--r--arch/sparc/mm/init_64.c6
-rw-r--r--arch/sparc/mm/srmmu.c1
-rw-r--r--arch/sparc/mm/tlb.c6
-rw-r--r--arch/sparc/mm/tsb.c4
-rw-r--r--arch/x86/Makefile35
-rw-r--r--arch/x86/Makefile_32.cpu18
-rw-r--r--arch/x86/boot/compressed/error.c1
-rw-r--r--arch/x86/entry/vdso/vdso32-setup.c11
-rw-r--r--arch/x86/events/core.c9
-rw-r--r--arch/x86/events/intel/lbr.c3
-rw-r--r--arch/x86/include/asm/elf.h2
-rw-r--r--arch/x86/include/asm/kvm_page_track.h1
-rw-r--r--arch/x86/include/asm/pmem.h42
-rw-r--r--arch/x86/include/asm/timer.h2
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h8
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c3
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_schemata.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c7
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c2
-rw-r--r--arch/x86/kernel/ftrace.c6
-rw-r--r--arch/x86/kernel/signal.c2
-rw-r--r--arch/x86/kernel/signal_compat.c4
-rw-r--r--arch/x86/kernel/traps.c4
-rw-r--r--arch/x86/kernel/tsc.c4
-rw-r--r--arch/x86/kvm/i8259.c3
-rw-r--r--arch/x86/kvm/ioapic.c3
-rw-r--r--arch/x86/kvm/page_track.c8
-rw-r--r--arch/x86/kvm/svm.c3
-rw-r--r--arch/x86/kvm/vmx.c59
-rw-r--r--arch/x86/kvm/x86.c7
-rw-r--r--arch/x86/lib/memcpy_64.S2
-rw-r--r--arch/x86/mm/init.c41
-rw-r--r--arch/x86/mm/kaslr.c4
-rw-r--r--arch/x86/platform/efi/quirks.c4
-rw-r--r--arch/x86/purgatory/Makefile1
-rw-r--r--arch/xtensa/include/asm/page.h13
-rw-r--r--arch/xtensa/include/uapi/asm/unistd.h5
-rw-r--r--arch/xtensa/kernel/traps.c6
-rw-r--r--block/blk-mq-sched.c181
-rw-r--r--block/blk-mq-sched.h25
-rw-r--r--block/blk-mq.c92
-rw-r--r--block/blk-mq.h2
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/elevator.c114
-rw-r--r--crypto/lrw.c7
-rw-r--r--crypto/xts.c7
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_platform.c8
-rw-r--r--drivers/acpi/acpica/utresrc.c17
-rw-r--r--drivers/acpi/apei/ghes.c1
-rw-r--r--drivers/acpi/glue.c12
-rw-r--r--drivers/acpi/ioapic.c6
-rw-r--r--drivers/acpi/nfit/core.c6
-rw-r--r--drivers/acpi/scan.c19
-rw-r--r--drivers/ata/pata_atiixp.c5
-rw-r--r--drivers/ata/sata_via.c18
-rw-r--r--drivers/block/nbd.c136
-rw-r--r--drivers/block/zram/zram_drv.c6
-rw-r--r--drivers/char/mem.c82
-rw-r--r--drivers/char/virtio_console.c6
-rw-r--r--drivers/clocksource/clkevt-probe.c2
-rw-r--r--drivers/cpufreq/cpufreq.c56
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c18
-rw-r--r--drivers/crypto/caam/caampkc.c2
-rw-r--r--drivers/crypto/caam/ctrl.c66
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c1
-rw-r--r--drivers/crypto/ccp/ccp-dev.h5
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c41
-rw-r--r--drivers/dax/Kconfig1
-rw-r--r--drivers/dax/dax.c13
-rw-r--r--drivers/dma-buf/dma-buf.c53
-rw-r--r--drivers/dma/bcm2835-dma.c5
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/edac/Kconfig10
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/i5000_edac.c2
-rw-r--r--drivers/edac/i5400_edac.c5
-rw-r--r--drivers/edac/pnd2_edac.c1546
-rw-r--r--drivers/edac/pnd2_edac.h301
-rw-r--r--drivers/edac/xgene_edac.c2
-rw-r--r--drivers/firmware/efi/efi.c1
-rw-r--r--drivers/firmware/efi/esrt.c2
-rw-r--r--drivers/firmware/efi/libstub/gop.c6
-rw-r--r--drivers/gpio/gpiolib-acpi.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c268
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c274
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c137
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h82
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h50
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c324
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h28
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h57
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c22
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c341
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c335
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h13
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c213
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h81
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c108
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h72
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c8
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c3
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c72
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c12
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c6
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c15
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c9
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c16
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c16
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c472
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c27
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c15
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c1
-rw-r--r--drivers/gpu/drm/drm_atomic.c162
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c119
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c51
-rw-r--r--drivers/gpu/drm/drm_crtc.c2
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h1
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c201
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c137
-rw-r--r--drivers/gpu/drm/drm_ioc32.c76
-rw-r--r--drivers/gpu/drm/drm_ioctl.c53
-rw-r--r--drivers/gpu/drm/drm_irq.c2
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c102
-rw-r--r--drivers/gpu/drm/drm_of.c52
-rw-r--r--drivers/gpu/drm/drm_plane.c49
-rw-r--r--drivers/gpu/drm/drm_prime.c8
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c102
-rw-r--r--drivers/gpu/drm/drm_property.c72
-rw-r--r--drivers/gpu/drm/drm_sysfs.c70
-rw-r--r--drivers/gpu/drm/etnaviv/Kconfig1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c65
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c106
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c25
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c44
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c3
-rw-r--r--drivers/gpu/drm/gma500/gma_display.h3
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c27
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c30
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c50
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c51
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c20
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c21
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h18
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c437
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c51
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h19
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c31
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c242
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c15
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c128
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c50
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c58
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h98
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c73
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c36
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c26
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c828
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c8
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c5
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c11
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h7
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h18
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c26
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c29
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c25
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c100
-rw-r--r--drivers/gpu/drm/i915/intel_display.c545
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c32
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h35
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c33
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h71
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c49
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c386
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c65
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c3
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c18
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.c10
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c131
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c63
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c88
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c31
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h65
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c12
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c286
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c21
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c342
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h84
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c150
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_request.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_dmabuf.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_drm.c45
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/scatterlist.c11
-rw-r--r--drivers/gpu/drm/imx/Kconfig7
-rw-r--r--drivers/gpu/drm/imx/Makefile3
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c18
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h2
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c27
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c8
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c36
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c64
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c39
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c12
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c75
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.c138
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c69
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c54
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c595
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c26
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mipi_tx.c38
-rw-r--r--drivers/gpu/drm/meson/Kconfig6
-rw-r--r--drivers/gpu/drm/meson/Makefile1
-rw-r--r--drivers/gpu/drm/meson/meson_canvas.c4
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c15
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c118
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h3
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c919
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.h146
-rw-r--r--drivers/gpu/drm/meson/meson_registers.h1
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c632
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.h6
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c1254
-rw-r--r--drivers/gpu/drm/meson/meson_venc.h7
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c30
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c6
-rw-r--r--drivers/gpu/drm/meson/meson_vpp.c8
-rw-r--r--drivers/gpu/drm/meson/meson_vpp.h2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c1
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c28
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c126
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c70
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c30
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c81
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c30
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c466
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c192
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h21
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c66
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c123
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h53
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c172
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h47
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c340
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.h6
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c13
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c39
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c35
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c186
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h18
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c69
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_out.c40
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c13
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss-of.c102
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c61
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h11
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c8
-rw-r--r--drivers/gpu/drm/panel/Kconfig13
-rw-r--r--drivers/gpu/drm/panel/Makefile2
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c739
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c56
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c449
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c1
-rw-r--r--drivers/gpu/drm/radeon/r100.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c5
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c29
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c10
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c50
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c33
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c12
-rw-r--r--drivers/gpu/drm/sun4i/Makefile4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c70
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.h8
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c47
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.h4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_framebuffer.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c32
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.h4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c41
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c126
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c27
-rw-r--r--drivers/gpu/drm/tegra/gem.c8
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c12
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.c68
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c49
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c14
-rw-r--r--drivers/gpu/drm/udl/udl_dmabuf.c8
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c16
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c15
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c58
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c72
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c58
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c77
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c31
-rw-r--r--drivers/gpu/ipu-v3/Makefile6
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c5
-rw-r--r--drivers/hid/hid-core.c5
-rw-r--r--drivers/hid/hid-ids.h4
-rw-r--r--drivers/hid/hid-uclogic.c2
-rw-r--r--drivers/hid/hid-xinmo.c1
-rw-r--r--drivers/hid/wacom_sys.c18
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c34
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c3
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c4
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c10
-rw-r--r--drivers/iio/gyro/bmg160_core.c12
-rw-r--r--drivers/iio/industrialio-core.c7
-rw-r--r--drivers/iio/pressure/st_pressure_core.c1
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c65
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h3
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/irqchip/Kconfig1
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c4
-rw-r--r--drivers/isdn/capi/kcapi.c1
-rw-r--r--drivers/md/dm-cache-metadata.c8
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-rq.c1
-rw-r--r--drivers/md/dm-verity-fec.c18
-rw-r--r--drivers/md/dm-verity-fec.h4
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c4
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c4
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c4
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c11
-rw-r--r--drivers/mmc/host/sdhci.c6
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c2
-rw-r--r--drivers/net/can/rcar/rcar_can.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c23
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c30
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c10
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c9
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c4
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c28
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c5
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c20
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c3
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c11
-rw-r--r--drivers/net/ethernet/ti/cpsw.c16
-rw-r--r--drivers/net/irda/vlsi_ir.c8
-rw-r--r--drivers/net/phy/mdio-boardinfo.c1
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/team/team.c19
-rw-r--r--drivers/net/usb/cdc_ether.c15
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c21
-rw-r--r--drivers/net/usb/usbnet.c19
-rw-r--r--drivers/net/virtio_net.c45
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c6
-rw-r--r--drivers/nvdimm/bus.c6
-rw-r--r--drivers/nvdimm/claim.c10
-rw-r--r--drivers/nvdimm/dimm_devs.c77
-rw-r--r--drivers/nvme/host/core.c2
-rw-r--r--drivers/nvme/host/fc.c2
-rw-r--r--drivers/nvme/host/rdma.c2
-rw-r--r--drivers/nvme/target/admin-cmd.c2
-rw-r--r--drivers/nvme/target/io-cmd.c4
-rw-r--r--drivers/nvme/target/loop.c2
-rw-r--r--drivers/pci/dwc/Kconfig1
-rw-r--r--drivers/pci/dwc/pcie-artpec6.c4
-rw-r--r--drivers/pci/dwc/pcie-designware-plat.c4
-rw-r--r--drivers/pci/host/pci-thunder-pem.c64
-rw-r--r--drivers/pci/host/pcie-iproc-bcma.c24
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c19
-rw-r--r--drivers/pci/host/pcie-iproc.h1
-rw-r--r--drivers/pinctrl/core.c97
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c26
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c4
-rw-r--r--drivers/pinctrl/pinctrl-single.c2
-rw-r--r--drivers/pinctrl/pinctrl-st.c30
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq4019.c30
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c4
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c80
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h11
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c13
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c11
-rw-r--r--drivers/pinctrl/ti/Kconfig2
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c2
-rw-r--r--drivers/ptp/ptp_kvm.c5
-rw-r--r--drivers/pwm/pwm-lpss-pci.c10
-rw-r--r--drivers/pwm/pwm-lpss-platform.c1
-rw-r--r--drivers/pwm/pwm-lpss.c19
-rw-r--r--drivers/pwm/pwm-lpss.h1
-rw-r--r--drivers/pwm/pwm-rockchip.c40
-rw-r--r--drivers/rapidio/devices/tsi721.c4
-rw-r--r--drivers/rapidio/devices/tsi721.h4
-rw-r--r--drivers/reset/core.c22
-rw-r--r--drivers/s390/crypto/pkey_api.c53
-rw-r--r--drivers/s390/net/qeth_core.h3
-rw-r--r--drivers/s390/net/qeth_core_main.c5
-rw-r--r--drivers/s390/net/qeth_l2_main.c5
-rw-r--r--drivers/s390/net/qeth_l3_main.c20
-rw-r--r--drivers/scsi/aacraid/aacraid.h11
-rw-r--r--drivers/scsi/aacraid/commsup.c17
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c38
-rw-r--r--drivers/scsi/hpsa.c1
-rw-r--r--drivers/scsi/ipr.c7
-rw-r--r--drivers/scsi/libsas/sas_ata.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c4
-rw-r--r--drivers/scsi/qedf/qedf_fip.c3
-rw-r--r--drivers/scsi/qedf/qedf_main.c1
-rw-r--r--drivers/scsi/qedi/qedi_main.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c10
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/scsi/sd.c23
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/sr.c6
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c4
-rw-r--r--drivers/scsi/ufs/ufshcd.c2
-rw-r--r--drivers/staging/android/ashmem.c1
-rw-r--r--drivers/staging/android/ion/ion.c8
-rw-r--r--drivers/target/iscsi/iscsi_target.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c13
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c16
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h2
-rw-r--r--drivers/target/target_core_alua.c136
-rw-r--r--drivers/target/target_core_configfs.c2
-rw-r--r--drivers/target/target_core_fabric_configfs.c5
-rw-r--r--drivers/target/target_core_tpg.c4
-rw-r--r--drivers/target/target_core_transport.c102
-rw-r--r--drivers/target/target_core_user.c97
-rw-r--r--drivers/thermal/cpu_cooling.c39
-rw-r--r--drivers/thermal/devfreq_cooling.c14
-rw-r--r--drivers/tty/serial/8250/Kconfig8
-rw-r--r--drivers/tty/serial/amba-pl011.c23
-rw-r--r--drivers/tty/serial/atmel_serial.c8
-rw-r--r--drivers/tty/serial/mxs-auart.c2
-rw-r--r--drivers/tty/tty_ldisc.c85
-rw-r--r--drivers/tty/vt/keyboard.c1
-rw-r--r--drivers/usb/core/hcd.c7
-rw-r--r--drivers/usb/gadget/function/f_tcm.c2
-rw-r--r--drivers/usb/host/xhci-plat.c1
-rw-r--r--drivers/usb/host/xhci-ring.c3
-rw-r--r--drivers/usb/host/xhci.c43
-rw-r--r--drivers/usb/phy/phy-isp1301.c2
-rw-r--r--drivers/video/fbdev/efifb.c66
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c15
-rw-r--r--drivers/video/fbdev/ssd1307fb.c24
-rw-r--r--drivers/video/fbdev/xen-fbfront.c4
-rw-r--r--drivers/virtio/virtio.c6
-rw-r--r--drivers/virtio/virtio_balloon.c19
-rw-r--r--drivers/virtio/virtio_pci_common.c374
-rw-r--r--drivers/virtio/virtio_pci_common.h43
-rw-r--r--drivers/virtio/virtio_pci_legacy.c8
-rw-r--r--drivers/virtio/virtio_pci_modern.c8
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c4
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/extent_io.c46
-rw-r--r--fs/btrfs/inode.c28
-rw-r--r--fs/btrfs/qgroup.c10
-rw-r--r--fs/btrfs/send.c7
-rw-r--r--fs/btrfs/super.c3
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/cifs/cifsfs.c87
-rw-r--r--fs/cifs/cifsfs.h5
-rw-r--r--fs/cifs/cifsglob.h19
-rw-r--r--fs/cifs/cifssmb.c4
-rw-r--r--fs/cifs/connect.c16
-rw-r--r--fs/cifs/file.c6
-rw-r--r--fs/cifs/ioctl.c66
-rw-r--r--fs/cifs/smb2misc.c46
-rw-r--r--fs/cifs/smb2ops.c37
-rw-r--r--fs/cifs/smb2pdu.c23
-rw-r--r--fs/cifs/smb2proto.h7
-rw-r--r--fs/cifs/smb2transport.c55
-rw-r--r--fs/cifs/transport.c2
-rw-r--r--fs/dax.c35
-rw-r--r--fs/ext4/ext4.h1
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/inode.c41
-rw-r--r--fs/ext4/namei.c2
-rw-r--r--fs/ext4/symlink.c3
-rw-r--r--fs/hugetlbfs/inode.c40
-rw-r--r--fs/namei.c3
-rw-r--r--fs/nfs/dir.c9
-rw-r--r--fs/nfs/filelayout/filelayout.c151
-rw-r--r--fs/nfs/filelayout/filelayout.h19
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c4
-rw-r--r--fs/nfs/nfs4proc.c9
-rw-r--r--fs/nfsd/nfsctl.c43
-rw-r--r--fs/nfsd/nfsproc.c1
-rw-r--r--fs/nfsd/nfssvc.c28
-rw-r--r--fs/orangefs/devorangefs-req.c9
-rw-r--r--fs/orangefs/orangefs-kernel.h1
-rw-r--r--fs/orangefs/super.c32
-rw-r--r--fs/proc/proc_sysctl.c1
-rw-r--r--fs/proc/task_mmu.c9
-rw-r--r--fs/stat.c86
-rw-r--r--fs/sysfs/file.c6
-rw-r--r--fs/userfaultfd.c2
-rw-r--r--fs/xfs/libxfs/xfs_dir2_priv.h3
-rw-r--r--fs/xfs/libxfs/xfs_dir2_sf.c63
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c35
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h2
-rw-r--r--fs/xfs/xfs_bmap_util.c10
-rw-r--r--fs/xfs/xfs_inode.c19
-rw-r--r--fs/xfs/xfs_iops.c14
-rw-r--r--fs/xfs/xfs_itable.c2
-rw-r--r--include/asm-generic/sections.h6
-rw-r--r--include/asm-generic/vmlinux.lds.h6
-rw-r--r--include/drm/bridge/dw_hdmi.h68
-rw-r--r--include/drm/drmP.h5
-rw-r--r--include/drm/drm_atomic.h2
-rw-r--r--include/drm/drm_atomic_helper.h3
-rw-r--r--include/drm/drm_connector.h6
-rw-r--r--include/drm/drm_crtc.h12
-rw-r--r--include/drm/drm_crtc_helper.h3
-rw-r--r--include/drm/drm_ioctl.h116
-rw-r--r--include/drm/drm_modeset_helper_vtables.h70
-rw-r--r--include/drm/drm_modeset_lock.h5
-rw-r--r--include/drm/drm_of.h13
-rw-r--r--include/drm/drm_panel.h2
-rw-r--r--include/drm/drm_sysfs.h8
-rw-r--r--include/drm/ttm/ttm_bo_api.h11
-rw-r--r--include/drm/ttm/ttm_bo_driver.h9
-rw-r--r--include/drm/ttm/ttm_object.h5
-rw-r--r--include/drm/ttm/ttm_placement.h1
-rw-r--r--include/kvm/arm_vgic.h1
-rw-r--r--include/linux/blk-mq.h2
-rw-r--r--include/linux/blkdev.h33
-rw-r--r--include/linux/cgroup.h21
-rw-r--r--include/linux/clockchips.h2
-rw-r--r--include/linux/dma-buf.h22
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/irqchip/arm-gic.h3
-rw-r--r--include/linux/kasan.h3
-rw-r--r--include/linux/kvm_host.h4
-rw-r--r--include/linux/memcontrol.h6
-rw-r--r--include/linux/mfd/cros_ec.h3
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mmu_notifier.h13
-rw-r--r--include/linux/nvme.h16
-rw-r--r--include/linux/pinctrl/pinctrl.h3
-rw-r--r--include/linux/reset.h22
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/sched/clock.h13
-rw-r--r--include/linux/stat.h1
-rw-r--r--include/linux/uio.h6
-rw-r--r--include/linux/virtio.h1
-rw-r--r--include/net/sctp/sctp.h22
-rw-r--r--include/net/sctp/structs.h11
-rw-r--r--include/target/target_core_base.h10
-rw-r--r--include/uapi/drm/drm.h3
-rw-r--r--include/uapi/drm/etnaviv_drm.h8
-rw-r--r--include/uapi/drm/msm_drm.h1
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/media-bus-format.h13
-rw-r--r--include/uapi/linux/stat.h5
-rw-r--r--include/uapi/linux/virtio_pci.h2
-rw-r--r--init/main.c2
-rw-r--r--kernel/audit.c67
-rw-r--r--kernel/audit.h8
-rw-r--r--kernel/auditsc.c25
-rw-r--r--kernel/bpf/core.c12
-rw-r--r--kernel/bpf/verifier.c64
-rw-r--r--kernel/cgroup/cgroup.c9
-rw-r--r--kernel/irq/affinity.c20
-rw-r--r--kernel/kthread.c3
-rw-r--r--kernel/padata.c5
-rw-r--r--kernel/ptrace.c14
-rw-r--r--kernel/sched/clock.c46
-rw-r--r--kernel/sysctl.c3
-rw-r--r--kernel/trace/ftrace.c20
-rw-r--r--kernel/trace/ring_buffer.c8
-rw-r--r--lib/iov_iter.c63
-rw-r--r--lib/syscall.c1
-rw-r--r--lib/test_kasan.c10
-rw-r--r--mm/huge_memory.c99
-rw-r--r--mm/hugetlb.c10
-rw-r--r--mm/internal.h7
-rw-r--r--mm/kasan/kasan.h5
-rw-r--r--mm/kasan/report.c36
-rw-r--r--mm/kmemleak.c2
-rw-r--r--mm/mempolicy.c20
-rw-r--r--mm/migrate.c7
-rw-r--r--mm/page_alloc.c11
-rw-r--r--mm/page_vma_mapped.c15
-rw-r--r--mm/rmap.c4
-rw-r--r--mm/swap.c27
-rw-r--r--mm/swap_cgroup.c2
-rw-r--r--mm/vmstat.c19
-rw-r--r--mm/workingset.c2
-rw-r--r--mm/z3fold.c9
-rw-r--r--mm/zsmalloc.c2
-rw-r--r--net/bridge/br_device.c20
-rw-r--r--net/bridge/br_if.c1
-rw-r--r--net/bridge/br_multicast.c7
-rw-r--r--net/bridge/br_netlink.c7
-rw-r--r--net/bridge/br_private.h5
-rw-r--r--net/core/datagram.c23
-rw-r--r--net/core/dev.c1
-rw-r--r--net/core/flow_dissector.c2
-rw-r--r--net/core/neighbour.c3
-rw-r--r--net/core/secure_seq.c31
-rw-r--r--net/core/sysctl_net_core.c6
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c20
-rw-r--r--net/ipv4/ping.c5
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp.c1
-rw-r--r--net/ipv4/tcp_input.c61
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv4/tcp_recovery.c3
-rw-r--r--net/ipv6/addrconf.c11
-rw-r--r--net/kcm/kcmsock.c6
-rw-r--r--net/l2tp/l2tp_core.c160
-rw-r--r--net/l2tp/l2tp_core.h9
-rw-r--r--net/l2tp/l2tp_debugfs.c10
-rw-r--r--net/l2tp/l2tp_eth.c10
-rw-r--r--net/l2tp/l2tp_ip.c22
-rw-r--r--net/l2tp/l2tp_ip6.c23
-rw-r--r--net/l2tp/l2tp_netlink.c52
-rw-r--r--net/l2tp/l2tp_ppp.c103
-rw-r--r--net/mac80211/iface.c3
-rw-r--r--net/netfilter/nf_conntrack_ecache.c2
-rw-r--r--net/netfilter/nf_conntrack_expect.c4
-rw-r--r--net/netfilter/nf_conntrack_extend.c13
-rw-r--r--net/netfilter/nf_conntrack_helper.c17
-rw-r--r--net/netfilter/nf_conntrack_netlink.c42
-rw-r--r--net/netfilter/nf_nat_core.c2
-rw-r--r--net/netfilter/nf_nat_redirect.c2
-rw-r--r--net/netfilter/nfnetlink_cthelper.c287
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c9
-rw-r--r--net/netfilter/nft_hash.c10
-rw-r--r--net/netfilter/xt_TCPMSS.c6
-rw-r--r--net/netfilter/xt_TPROXY.c5
-rw-r--r--net/openvswitch/conntrack.c4
-rw-r--r--net/openvswitch/flow.c10
-rw-r--r--net/packet/af_packet.c8
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sctp/associola.c13
-rw-r--r--net/sctp/input.c4
-rw-r--r--net/sctp/output.c69
-rw-r--r--net/sctp/outqueue.c3
-rw-r--r--net/sctp/proc.c4
-rw-r--r--net/sctp/sm_make_chunk.c9
-rw-r--r--net/sctp/sm_statefuns.c6
-rw-r--r--net/sctp/socket.c21
-rw-r--r--net/sctp/stream.c43
-rw-r--r--net/sctp/transport.c19
-rw-r--r--net/sunrpc/svcsock.c1
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c1
-rw-r--r--net/wireless/sysfs.c10
-rw-r--r--net/xfrm/xfrm_user.c9
-rw-r--r--samples/statx/test-statx.c12
-rw-r--r--scripts/Kbuild.include4
-rw-r--r--scripts/Makefile.lib2
-rw-r--r--scripts/kconfig/gconf.c2
-rw-r--r--sound/core/seq/seq_fifo.c4
-rw-r--r--sound/pci/hda/patch_realtek.c12
-rw-r--r--sound/soc/atmel/atmel-classd.c2
-rw-r--r--sound/soc/codecs/hdac_hdmi.c16
-rw-r--r--sound/soc/codecs/rt5665.c10
-rw-r--r--sound/soc/codecs/rt5665.h2
-rw-r--r--sound/soc/codecs/wm_adsp.c9
-rw-r--r--sound/soc/generic/simple-card-utils.c1
-rw-r--r--sound/soc/intel/skylake/skl-topology.c2
-rw-r--r--sound/soc/mediatek/Kconfig2
-rw-r--r--sound/soc/sh/rcar/cmd.c36
-rw-r--r--sound/soc/sh/rcar/dma.c18
-rw-r--r--sound/soc/sh/rcar/ssiu.c6
-rw-r--r--sound/soc/soc-core.c8
-rw-r--r--sound/soc/sti/uniperif_reader.c3
-rw-r--r--sound/soc/sunxi/sun8i-codec.c67
-rw-r--r--tools/include/linux/filter.h10
-rw-r--r--tools/perf/util/annotate.c6
-rw-r--r--tools/power/cpupower/utils/helpers/cpuid.c1
-rw-r--r--tools/power/x86/turbostat/turbostat.82
-rw-r--r--tools/power/x86/turbostat/turbostat.c26
-rw-r--r--tools/testing/selftests/bpf/Makefile9
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c290
-rw-r--r--tools/testing/selftests/powerpc/Makefile10
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c19
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c20
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c23
-rw-r--r--virt/kvm/arm/vgic/vgic.h11
-rw-r--r--virt/kvm/eventfd.c3
-rw-r--r--virt/kvm/kvm_main.c44
970 files changed, 24336 insertions, 10164 deletions
diff --git a/.mailmap b/.mailmap
index 67dc22ffc9a8..1d6f4e7280dc 100644
--- a/.mailmap
+++ b/.mailmap
@@ -99,6 +99,8 @@ Linas Vepstas <linas@austin.ibm.com>
99Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de> 99Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
100Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch> 100Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
101Mark Brown <broonie@sirena.org.uk> 101Mark Brown <broonie@sirena.org.uk>
102Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
103Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
102Matthieu CASTET <castet.matthieu@free.fr> 104Matthieu CASTET <castet.matthieu@free.fr>
103Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br> 105Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br>
104Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com> 106Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com>
@@ -171,6 +173,7 @@ Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
171Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com> 173Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
172Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com> 174Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
173Takashi YOSHII <takashi.yoshii.zj@renesas.com> 175Takashi YOSHII <takashi.yoshii.zj@renesas.com>
176Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
174Yusuke Goda <goda.yusuke@renesas.com> 177Yusuke Goda <goda.yusuke@renesas.com>
175Gustavo Padovan <gustavo@las.ic.unicamp.br> 178Gustavo Padovan <gustavo@las.ic.unicamp.br>
176Gustavo Padovan <padovan@profusion.mobi> 179Gustavo Padovan <padovan@profusion.mobi>
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 2ba45caabada..facc20a3f962 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1725,6 +1725,12 @@
1725 kernel and module base offset ASLR (Address Space 1725 kernel and module base offset ASLR (Address Space
1726 Layout Randomization). 1726 Layout Randomization).
1727 1727
1728 kasan_multi_shot
1729 [KNL] Enforce KASAN (Kernel Address Sanitizer) to print
1730 report on every invalid memory access. Without this
1731 parameter KASAN will print report only for the first
1732 invalid access.
1733
1728 keepinitrd [HW,ARM] 1734 keepinitrd [HW,ARM]
1729 1735
1730 kernelcore= [KNL,X86,IA-64,PPC] 1736 kernelcore= [KNL,X86,IA-64,PPC]
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt
new file mode 100644
index 000000000000..7f040edc16fe
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt
@@ -0,0 +1,111 @@
1Amlogic specific extensions to the Synopsys Designware HDMI Controller
2======================================================================
3
4The Amlogic Meson Synopsys Designware Integration is composed of :
5- A Synopsys DesignWare HDMI Controller IP
6- A TOP control block controlling the Clocks and PHY
7- A custom HDMI PHY in order to convert video to TMDS signal
8 ___________________________________
9| HDMI TOP |<= HPD
10|___________________________________|
11| | |
12| Synopsys HDMI | HDMI PHY |=> TMDS
13| Controller |________________|
14|___________________________________|<=> DDC
15
16The HDMI TOP block only supports HPD sensing.
17The Synopsys HDMI Controller interrupt is routed through the
18TOP Block interrupt.
19Communication to the TOP Block and the Synopsys HDMI Controller is done
20via a pair of dedicated addr+read/write registers.
21The HDMI PHY is configured by registers in the HHI register block.
22
23Pixel data arrives in 4:4:4 format from the VENC block and the VPU HDMI mux
24selects either the ENCI encoder for the 576i or 480i formats or the ENCP
25encoder for all the other formats including interlaced HD formats.
26
27The VENC uses a DVI encoder on top of the ENCI or ENCP encoders to generate
28DVI timings for the HDMI controller.
29
30Amlogic Meson GXBB, GXL and GXM SoCs families embeds the Synopsys DesignWare
31HDMI TX IP version 2.01a with HDCP and I2C & S/PDIF
32audio source interfaces.
33
34Required properties:
35- compatible: value should be different for each SoC family as :
36 - GXBB (S905) : "amlogic,meson-gxbb-dw-hdmi"
37 - GXL (S905X, S905D) : "amlogic,meson-gxl-dw-hdmi"
38 - GXM (S912) : "amlogic,meson-gxm-dw-hdmi"
39 followed by the common "amlogic,meson-gx-dw-hdmi"
40- reg: Physical base address and length of the controller's registers.
41- interrupts: The HDMI interrupt number
42- clocks, clock-names : must have the phandles to the HDMI iahb and isfr clocks,
43 and the Amlogic Meson venci clocks as described in
44 Documentation/devicetree/bindings/clock/clock-bindings.txt,
45 the clocks are soc specific, the clock-names should be "iahb", "isfr", "venci"
46- resets, resets-names: must have the phandles to the HDMI apb, glue and phy
47 resets as described in :
48 Documentation/devicetree/bindings/reset/reset.txt,
49 the reset-names should be "hdmitx_apb", "hdmitx", "hdmitx_phy"
50
51Required nodes:
52
53The connections to the HDMI ports are modeled using the OF graph
54bindings specified in Documentation/devicetree/bindings/graph.txt.
55
56The following table lists for each supported model the port number
57corresponding to each HDMI output and input.
58
59 Port 0 Port 1
60-----------------------------------------
61 S905 (GXBB) VENC Input TMDS Output
62 S905X (GXL) VENC Input TMDS Output
63 S905D (GXL) VENC Input TMDS Output
64 S912 (GXM) VENC Input TMDS Output
65
66Example:
67
68hdmi-connector {
69 compatible = "hdmi-connector";
70 type = "a";
71
72 port {
73 hdmi_connector_in: endpoint {
74 remote-endpoint = <&hdmi_tx_tmds_out>;
75 };
76 };
77};
78
79hdmi_tx: hdmi-tx@c883a000 {
80 compatible = "amlogic,meson-gxbb-dw-hdmi", "amlogic,meson-gx-dw-hdmi";
81 reg = <0x0 0xc883a000 0x0 0x1c>;
82 interrupts = <GIC_SPI 57 IRQ_TYPE_EDGE_RISING>;
83 resets = <&reset RESET_HDMITX_CAPB3>,
84 <&reset RESET_HDMI_SYSTEM_RESET>,
85 <&reset RESET_HDMI_TX>;
86 reset-names = "hdmitx_apb", "hdmitx", "hdmitx_phy";
87 clocks = <&clkc CLKID_HDMI_PCLK>,
88 <&clkc CLKID_CLK81>,
89 <&clkc CLKID_GCLK_VENCI_INT0>;
90 clock-names = "isfr", "iahb", "venci";
91 #address-cells = <1>;
92 #size-cells = <0>;
93
94 /* VPU VENC Input */
95 hdmi_tx_venc_port: port@0 {
96 reg = <0>;
97
98 hdmi_tx_in: endpoint {
99 remote-endpoint = <&hdmi_tx_out>;
100 };
101 };
102
103 /* TMDS Output */
104 hdmi_tx_tmds_port: port@1 {
105 reg = <1>;
106
107 hdmi_tx_tmds_out: endpoint {
108 remote-endpoint = <&hdmi_connector_in>;
109 };
110 };
111};
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
index 708f5664a316..383183a89164 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
@@ -40,6 +40,7 @@ Required properties (all function blocks):
40 "mediatek,<chip>-dpi" - DPI controller, see mediatek,dpi.txt 40 "mediatek,<chip>-dpi" - DPI controller, see mediatek,dpi.txt
41 "mediatek,<chip>-disp-mutex" - display mutex 41 "mediatek,<chip>-disp-mutex" - display mutex
42 "mediatek,<chip>-disp-od" - overdrive 42 "mediatek,<chip>-disp-od" - overdrive
43 the supported chips are mt2701 and mt8173.
43- reg: Physical base address and length of the function block register space 44- reg: Physical base address and length of the function block register space
44- interrupts: The interrupt signal from the function block (required, except for 45- interrupts: The interrupt signal from the function block (required, except for
45 merge and split function blocks). 46 merge and split function blocks).
@@ -54,6 +55,7 @@ Required properties (DMA function blocks):
54 "mediatek,<chip>-disp-ovl" 55 "mediatek,<chip>-disp-ovl"
55 "mediatek,<chip>-disp-rdma" 56 "mediatek,<chip>-disp-rdma"
56 "mediatek,<chip>-disp-wdma" 57 "mediatek,<chip>-disp-wdma"
58 the supported chips are mt2701 and mt8173.
57- larb: Should contain a phandle pointing to the local arbiter device as defined 59- larb: Should contain a phandle pointing to the local arbiter device as defined
58 in Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt 60 in Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
59- iommus: Should point to the respective IOMMU block with master port as 61- iommus: Should point to the respective IOMMU block with master port as
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt
index 2b1585a34b85..fadf327c7cdf 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt
@@ -7,6 +7,7 @@ channel output.
7 7
8Required properties: 8Required properties:
9- compatible: "mediatek,<chip>-dsi" 9- compatible: "mediatek,<chip>-dsi"
10 the supported chips are mt2701 and mt8173.
10- reg: Physical base address and length of the controller's registers 11- reg: Physical base address and length of the controller's registers
11- interrupts: The interrupt signal from the function block. 12- interrupts: The interrupt signal from the function block.
12- clocks: device clocks 13- clocks: device clocks
@@ -25,6 +26,7 @@ The MIPI TX configuration module controls the MIPI D-PHY.
25 26
26Required properties: 27Required properties:
27- compatible: "mediatek,<chip>-mipi-tx" 28- compatible: "mediatek,<chip>-mipi-tx"
29 the supported chips are mt2701 and mt8173.
28- reg: Physical base address and length of the controller's registers 30- reg: Physical base address and length of the controller's registers
29- clocks: PLL reference clock 31- clocks: PLL reference clock
30- clock-output-names: name of the output clock line to the DSI encoder 32- clock-output-names: name of the output clock line to the DSI encoder
diff --git a/Documentation/devicetree/bindings/display/panel/ampire,am-480272h3tmqw-t01h.txt b/Documentation/devicetree/bindings/display/panel/ampire,am-480272h3tmqw-t01h.txt
new file mode 100644
index 000000000000..6812280cb109
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/ampire,am-480272h3tmqw-t01h.txt
@@ -0,0 +1,26 @@
1Ampire AM-480272H3TMQW-T01H 4.3" WQVGA TFT LCD panel
2
3This binding is compatible with the simple-panel binding, which is specified
4in simple-panel.txt in this directory.
5
6Required properties:
7- compatible: should be "ampire,am-480272h3tmqw-t01h"
8
9Optional properties:
10- power-supply: regulator to provide the supply voltage
11- enable-gpios: GPIO pin to enable or disable the panel
12- backlight: phandle of the backlight device attached to the panel
13
14Optional nodes:
15- Video port for RGB input.
16
17Example:
18 panel_rgb: panel-rgb {
19 compatible = "ampire,am-480272h3tmqw-t01h";
20 enable-gpios = <&gpioa 8 1>;
21 port {
22 panel_in_rgb: endpoint {
23 remote-endpoint = <&controller_out_rgb>;
24 };
25 };
26 };
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e3ha2.txt b/Documentation/devicetree/bindings/display/panel/samsung,s6e3ha2.txt
new file mode 100644
index 000000000000..18854f4c8376
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/samsung,s6e3ha2.txt
@@ -0,0 +1,28 @@
1Samsung S6E3HA2 5.7" 1440x2560 AMOLED panel
2
3Required properties:
4 - compatible: "samsung,s6e3ha2"
5 - reg: the virtual channel number of a DSI peripheral
6 - vdd3-supply: I/O voltage supply
7 - vci-supply: voltage supply for analog circuits
8 - reset-gpios: a GPIO spec for the reset pin (active low)
9 - enable-gpios: a GPIO spec for the panel enable pin (active high)
10
11Optional properties:
12 - te-gpios: a GPIO spec for the tearing effect synchronization signal
13 gpio pin (active high)
14
15Example:
16&dsi {
17 ...
18
19 panel@0 {
20 compatible = "samsung,s6e3ha2";
21 reg = <0>;
22 vdd3-supply = <&ldo27_reg>;
23 vci-supply = <&ldo28_reg>;
24 reset-gpios = <&gpg0 0 GPIO_ACTIVE_LOW>;
25 enable-gpios = <&gpf1 5 GPIO_ACTIVE_HIGH>;
26 te-gpios = <&gpf1 3 GPIO_ACTIVE_HIGH>;
27 };
28};
diff --git a/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.txt b/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.txt
new file mode 100644
index 000000000000..c6995dde641b
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.txt
@@ -0,0 +1,37 @@
1Sitronix ST7789V RGB panel with SPI control bus
2
3Required properties:
4 - compatible: "sitronix,st7789v"
5 - reg: Chip select of the panel on the SPI bus
6 - reset-gpios: a GPIO phandle for the reset pin
7 - power-supply: phandle of the regulator that provides the supply voltage
8
9Optional properties:
10 - backlight: phandle to the backlight used
11
12The generic bindings for the SPI slaves documented in [1] also applies
13
14The device node can contain one 'port' child node with one child
15'endpoint' node, according to the bindings defined in [2]. This
16node should describe panel's video bus.
17
18[1]: Documentation/devicetree/bindings/spi/spi-bus.txt
19[2]: Documentation/devicetree/bindings/graph.txt
20
21Example:
22
23panel@0 {
24 compatible = "sitronix,st7789v";
25 reg = <0>;
26 reset-gpios = <&pio 6 11 GPIO_ACTIVE_LOW>;
27 backlight = <&pwm_bl>;
28 spi-max-frequency = <100000>;
29 spi-cpol;
30 spi-cpha;
31
32 port {
33 panel_input: endpoint {
34 remote-endpoint = <&tcon0_out_panel>;
35 };
36 };
37};
diff --git a/Documentation/devicetree/bindings/display/panel/winstar,wf35ltiacd.txt b/Documentation/devicetree/bindings/display/panel/winstar,wf35ltiacd.txt
new file mode 100644
index 000000000000..2a7e6e3ba64c
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/winstar,wf35ltiacd.txt
@@ -0,0 +1,48 @@
1Winstar Display Corporation 3.5" QVGA (320x240) TFT LCD panel
2
3Required properties:
4- compatible: should be "winstar,wf35ltiacd"
5- power-supply: regulator to provide the VCC supply voltage (3.3 volts)
6
7This binding is compatible with the simple-panel binding, which is specified
8in simple-panel.txt in this directory.
9
10Example:
11 backlight: backlight {
12 compatible = "pwm-backlight";
13 pwms = <&hlcdc_pwm 0 50000 PWM_POLARITY_INVERTED>;
14 brightness-levels = <0 31 63 95 127 159 191 223 255>;
15 default-brightness-level = <191>;
16 power-supply = <&bl_reg>;
17 };
18
19 bl_reg: backlight_regulator {
20 compatible = "regulator-fixed";
21 regulator-name = "backlight-power-supply";
22 regulator-min-microvolt = <5000000>;
23 regulator-max-microvolt = <5000000>;
24 };
25
26 panel: panel {
27 compatible = "winstar,wf35ltiacd", "simple-panel";
28 backlight = <&backlight>;
29 power-supply = <&panel_reg>;
30 #address-cells = <1>;
31 #size-cells = <0>;
32
33 port {
34 #address-cells = <1>;
35 #size-cells = <0>;
36
37 panel_input: endpoint {
38 remote-endpoint = <&hlcdc_panel_output>;
39 };
40 };
41 };
42
43 panel_reg: panel_regulator {
44 compatible = "regulator-fixed";
45 regulator-name = "panel-power-supply";
46 regulator-min-microvolt = <3300000>;
47 regulator-max-microvolt = <3300000>;
48 };
diff --git a/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt
index 1d722f5055ab..543b07435f4f 100644
--- a/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt
+++ b/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt
@@ -17,9 +17,12 @@ Required properties:
17 17
18Optional properties: 18Optional properties:
19- power-domains: a phandle to mipi dsi power domain node. 19- power-domains: a phandle to mipi dsi power domain node.
20- resets: list of phandle + reset specifier pairs, as described in [3].
21- reset-names: string reset name, must be "apb".
20 22
21[1] Documentation/devicetree/bindings/clock/clock-bindings.txt 23[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
22[2] Documentation/devicetree/bindings/media/video-interfaces.txt 24[2] Documentation/devicetree/bindings/media/video-interfaces.txt
25[3] Documentation/devicetree/bindings/reset/reset.txt
23 26
24Example: 27Example:
25 mipi_dsi: mipi@ff960000 { 28 mipi_dsi: mipi@ff960000 {
@@ -30,6 +33,8 @@ Example:
30 interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>; 33 interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
31 clocks = <&cru SCLK_MIPI_24M>, <&cru PCLK_MIPI_DSI0>; 34 clocks = <&cru SCLK_MIPI_24M>, <&cru PCLK_MIPI_DSI0>;
32 clock-names = "ref", "pclk"; 35 clock-names = "ref", "pclk";
36 resets = <&cru SRST_MIPIDSI0>;
37 reset-names = "apb";
33 rockchip,grf = <&grf>; 38 rockchip,grf = <&grf>;
34 status = "okay"; 39 status = "okay";
35 40
diff --git a/Documentation/devicetree/bindings/rng/omap_rng.txt b/Documentation/devicetree/bindings/rng/omap_rng.txt
index 471477299ece..9cf7876ab434 100644
--- a/Documentation/devicetree/bindings/rng/omap_rng.txt
+++ b/Documentation/devicetree/bindings/rng/omap_rng.txt
@@ -12,7 +12,8 @@ Required properties:
12- reg : Offset and length of the register set for the module 12- reg : Offset and length of the register set for the module
13- interrupts : the interrupt number for the RNG module. 13- interrupts : the interrupt number for the RNG module.
14 Used for "ti,omap4-rng" and "inside-secure,safexcel-eip76" 14 Used for "ti,omap4-rng" and "inside-secure,safexcel-eip76"
15- clocks: the trng clock source 15- clocks: the trng clock source. Only mandatory for the
16 "inside-secure,safexcel-eip76" compatible.
16 17
17Example: 18Example:
18/* AM335x */ 19/* AM335x */
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 4686f4bdaca0..aed180d8e585 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -336,6 +336,7 @@ wd Western Digital Corp.
336wetek WeTek Electronics, limited. 336wetek WeTek Electronics, limited.
337wexler Wexler 337wexler Wexler
338winbond Winbond Electronics corp. 338winbond Winbond Electronics corp.
339winstar Winstar Display Corp.
339wlf Wolfson Microelectronics 340wlf Wolfson Microelectronics
340wm Wondermedia Technologies, Inc. 341wm Wondermedia Technologies, Inc.
341x-powers X-Powers 342x-powers X-Powers
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index fdcfdd79682a..fe25787ff6d4 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -58,8 +58,7 @@ prototypes:
58 int (*permission) (struct inode *, int, unsigned int); 58 int (*permission) (struct inode *, int, unsigned int);
59 int (*get_acl)(struct inode *, int); 59 int (*get_acl)(struct inode *, int);
60 int (*setattr) (struct dentry *, struct iattr *); 60 int (*setattr) (struct dentry *, struct iattr *);
61 int (*getattr) (const struct path *, struct dentry *, struct kstat *, 61 int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
62 u32, unsigned int);
63 ssize_t (*listxattr) (struct dentry *, char *, size_t); 62 ssize_t (*listxattr) (struct dentry *, char *, size_t);
64 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); 63 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
65 void (*update_time)(struct inode *, struct timespec *, int); 64 void (*update_time)(struct inode *, struct timespec *, int);
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 95280079c0b3..5fb17f49f7a2 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -600,3 +600,9 @@ in your dentry operations instead.
600[recommended] 600[recommended]
601 ->readlink is optional for symlinks. Don't set, unless filesystem needs 601 ->readlink is optional for symlinks. Don't set, unless filesystem needs
602 to fake something for readlink(2). 602 to fake something for readlink(2).
603--
604[mandatory]
605 ->getattr() is now passed a struct path rather than a vfsmount and
606 dentry separately, and it now has request_mask and query_flags arguments
607 to specify the fields and sync type requested by statx. Filesystems not
608 supporting any statx-specific features may ignore the new arguments.
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 569211703721..94dd27ef4a76 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -382,8 +382,7 @@ struct inode_operations {
382 int (*permission) (struct inode *, int); 382 int (*permission) (struct inode *, int);
383 int (*get_acl)(struct inode *, int); 383 int (*get_acl)(struct inode *, int);
384 int (*setattr) (struct dentry *, struct iattr *); 384 int (*setattr) (struct dentry *, struct iattr *);
385 int (*getattr) (const struct path *, struct dentry *, struct kstat *, 385 int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
386 u32, unsigned int);
387 ssize_t (*listxattr) (struct dentry *, char *, size_t); 386 ssize_t (*listxattr) (struct dentry *, char *, size_t);
388 void (*update_time)(struct inode *, struct timespec *, int); 387 void (*update_time)(struct inode *, struct timespec *, int);
389 int (*atomic_open)(struct inode *, struct dentry *, struct file *, 388 int (*atomic_open)(struct inode *, struct dentry *, struct file *,
diff --git a/Documentation/gpu/bridge/dw-hdmi.rst b/Documentation/gpu/bridge/dw-hdmi.rst
new file mode 100644
index 000000000000..486faadf00af
--- /dev/null
+++ b/Documentation/gpu/bridge/dw-hdmi.rst
@@ -0,0 +1,15 @@
1=======================================================
2 drm/bridge/dw-hdmi Synopsys DesignWare HDMI Controller
3=======================================================
4
5Synopsys DesignWare HDMI Controller
6===================================
7
8This section covers everything related to the Synopsys DesignWare HDMI
9Controller implemented as a DRM bridge.
10
11Supported Input Formats and Encodings
12-------------------------------------
13
14.. kernel-doc:: include/drm/bridge/dw_hdmi.h
15 :doc: Supported input formats and encodings
diff --git a/Documentation/gpu/drm-internals.rst b/Documentation/gpu/drm-internals.rst
index a09c721f9e89..babfb6143bd9 100644
--- a/Documentation/gpu/drm-internals.rst
+++ b/Documentation/gpu/drm-internals.rst
@@ -255,56 +255,6 @@ File Operations
255.. kernel-doc:: drivers/gpu/drm/drm_file.c 255.. kernel-doc:: drivers/gpu/drm/drm_file.c
256 :export: 256 :export:
257 257
258IOCTLs
259------
260
261struct drm_ioctl_desc \*ioctls; int num_ioctls;
262 Driver-specific ioctls descriptors table.
263
264Driver-specific ioctls numbers start at DRM_COMMAND_BASE. The ioctls
265descriptors table is indexed by the ioctl number offset from the base
266value. Drivers can use the DRM_IOCTL_DEF_DRV() macro to initialize
267the table entries.
268
269::
270
271 DRM_IOCTL_DEF_DRV(ioctl, func, flags)
272
273``ioctl`` is the ioctl name. Drivers must define the DRM_##ioctl and
274DRM_IOCTL_##ioctl macros to the ioctl number offset from
275DRM_COMMAND_BASE and the ioctl number respectively. The first macro is
276private to the device while the second must be exposed to userspace in a
277public header.
278
279``func`` is a pointer to the ioctl handler function compatible with the
280``drm_ioctl_t`` type.
281
282::
283
284 typedef int drm_ioctl_t(struct drm_device *dev, void *data,
285 struct drm_file *file_priv);
286
287``flags`` is a bitmask combination of the following values. It restricts
288how the ioctl is allowed to be called.
289
290- DRM_AUTH - Only authenticated callers allowed
291
292- DRM_MASTER - The ioctl can only be called on the master file handle
293
294- DRM_ROOT_ONLY - Only callers with the SYSADMIN capability allowed
295
296- DRM_CONTROL_ALLOW - The ioctl can only be called on a control
297 device
298
299- DRM_UNLOCKED - The ioctl handler will be called without locking the
300 DRM global mutex. This is the enforced default for kms drivers (i.e.
301 using the DRIVER_MODESET flag) and hence shouldn't be used any more
302 for new drivers.
303
304.. kernel-doc:: drivers/gpu/drm/drm_ioctl.c
305 :export:
306
307
308Misc Utilities 258Misc Utilities
309============== 259==============
310 260
diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst
index 76356c86e358..858457567d3d 100644
--- a/Documentation/gpu/drm-uapi.rst
+++ b/Documentation/gpu/drm-uapi.rst
@@ -160,6 +160,20 @@ other hand, a driver requires shared state between clients which is
160visible to user-space and accessible beyond open-file boundaries, they 160visible to user-space and accessible beyond open-file boundaries, they
161cannot support render nodes. 161cannot support render nodes.
162 162
163IOCTL Support on Device Nodes
164=============================
165
166.. kernel-doc:: drivers/gpu/drm/drm_ioctl.c
167 :doc: driver specific ioctls
168
169.. kernel-doc:: include/drm/drm_ioctl.h
170 :internal:
171
172.. kernel-doc:: drivers/gpu/drm/drm_ioctl.c
173 :export:
174
175.. kernel-doc:: drivers/gpu/drm/drm_ioc32.c
176 :export:
163 177
164Testing and validation 178Testing and validation
165====================== 179======================
@@ -219,6 +233,16 @@ Debugfs Support
219.. kernel-doc:: drivers/gpu/drm/drm_debugfs.c 233.. kernel-doc:: drivers/gpu/drm/drm_debugfs.c
220 :export: 234 :export:
221 235
236Sysfs Support
237=============
238
239.. kernel-doc:: drivers/gpu/drm/drm_sysfs.c
240 :doc: overview
241
242.. kernel-doc:: drivers/gpu/drm/drm_sysfs.c
243 :export:
244
245
222VBlank event handling 246VBlank event handling
223===================== 247=====================
224 248
diff --git a/Documentation/gpu/index.rst b/Documentation/gpu/index.rst
index e998ee0d0dd5..c572f092739e 100644
--- a/Documentation/gpu/index.rst
+++ b/Documentation/gpu/index.rst
@@ -11,10 +11,12 @@ Linux GPU Driver Developer's Guide
11 drm-kms-helpers 11 drm-kms-helpers
12 drm-uapi 12 drm-uapi
13 i915 13 i915
14 meson
14 tinydrm 15 tinydrm
15 vc4 16 vc4
16 vga-switcheroo 17 vga-switcheroo
17 vgaarbiter 18 vgaarbiter
19 bridge/dw-hdmi
18 todo 20 todo
19 21
20.. only:: subproject and html 22.. only:: subproject and html
diff --git a/Documentation/gpu/meson.rst b/Documentation/gpu/meson.rst
new file mode 100644
index 000000000000..479f6f51a13b
--- /dev/null
+++ b/Documentation/gpu/meson.rst
@@ -0,0 +1,61 @@
1=============================================
2drm/meson AmLogic Meson Video Processing Unit
3=============================================
4
5.. kernel-doc:: drivers/gpu/drm/meson/meson_drv.c
6 :doc: Video Processing Unit
7
8Video Processing Unit
9=====================
10
11The Amlogic Meson Display controller is composed of several components
12that are going to be documented below:
13
14.. code::
15
16 DMC|---------------VPU (Video Processing Unit)----------------|------HHI------|
17 | vd1 _______ _____________ _________________ | |
18 D |-------| |----| | | | | HDMI PLL |
19 D | vd2 | VIU | | Video Post | | Video Encoders |<---|-----VCLK |
20 R |-------| |----| Processing | | | | |
21 | osd2 | | | |---| Enci ----------|----|-----VDAC------|
22 R |-------| CSC |----| Scalers | | Encp ----------|----|----HDMI-TX----|
23 A | osd1 | | | Blenders | | Encl ----------|----|---------------|
24 M |-------|______|----|____________| |________________| | |
25 ___|__________________________________________________________|_______________|
26
27Video Input Unit
28================
29
30.. kernel-doc:: drivers/gpu/drm/meson/meson_viu.c
31 :doc: Video Input Unit
32
33Video Post Processing
34=====================
35
36.. kernel-doc:: drivers/gpu/drm/meson/meson_vpp.c
37 :doc: Video Post Processing
38
39Video Encoder
40=============
41
42.. kernel-doc:: drivers/gpu/drm/meson/meson_venc.c
43 :doc: Video Encoder
44
45Video Canvas Management
46=======================
47
48.. kernel-doc:: drivers/gpu/drm/meson/meson_canvas.c
49 :doc: Canvas
50
51Video Clocks
52============
53
54.. kernel-doc:: drivers/gpu/drm/meson/meson_vclk.c
55 :doc: Video Clocks
56
57HDMI Video Output
58=================
59
60.. kernel-doc:: drivers/gpu/drm/meson/meson_dw_hdmi.c
61 :doc: HDMI Output
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index e255b36b34a3..1bdb7356a310 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -16,7 +16,7 @@ De-midlayer drivers
16With the recent ``drm_bus`` cleanup patches for 3.17 it is no longer required 16With the recent ``drm_bus`` cleanup patches for 3.17 it is no longer required
17to have a ``drm_bus`` structure set up. Drivers can directly set up the 17to have a ``drm_bus`` structure set up. Drivers can directly set up the
18``drm_device`` structure instead of relying on bus methods in ``drm_usb.c`` 18``drm_device`` structure instead of relying on bus methods in ``drm_usb.c``
19and ``drm_platform.c``. The goal is to get rid of the driver's ``->load`` / 19and ``drm_pci.c``. The goal is to get rid of the driver's ``->load`` /
20``->unload`` callbacks and open-code the load/unload sequence properly, using 20``->unload`` callbacks and open-code the load/unload sequence properly, using
21the new two-stage ``drm_device`` setup/teardown. 21the new two-stage ``drm_device`` setup/teardown.
22 22
@@ -175,7 +175,7 @@ fine-grained per-buffer object and per-context lockings scheme. Currently the
175following drivers still use ``struct_mutex``: ``msm``, ``omapdrm`` and 175following drivers still use ``struct_mutex``: ``msm``, ``omapdrm`` and
176``udl``. 176``udl``.
177 177
178Contact: Daniel Vetter 178Contact: Daniel Vetter, respective driver maintainers
179 179
180Switch to drm_connector_list_iter for any connector_list walking 180Switch to drm_connector_list_iter for any connector_list walking
181---------------------------------------------------------------- 181----------------------------------------------------------------
@@ -217,6 +217,8 @@ plan is to switch to per-file driver API headers, which will also structure
217the kerneldoc better. This should also allow more fine-grained ``#include`` 217the kerneldoc better. This should also allow more fine-grained ``#include``
218directives. 218directives.
219 219
220In the end no .c file should need to include ``drmP.h`` anymore.
221
220Contact: Daniel Vetter 222Contact: Daniel Vetter
221 223
222Add missing kerneldoc for exported functions 224Add missing kerneldoc for exported functions
@@ -244,13 +246,8 @@ be hidden so that driver writers don't accidentally end up using it. And to
244prevent security issues in those legacy IOCTLs from being exploited on modern 246prevent security issues in those legacy IOCTLs from being exploited on modern
245drivers. This has multiple possible subtasks: 247drivers. This has multiple possible subtasks:
246 248
247* Make sure legacy IOCTLs can't be used on modern drivers.
248* Extract support code for legacy features into a ``drm-legacy.ko`` kernel 249* Extract support code for legacy features into a ``drm-legacy.ko`` kernel
249 module and compile it only when one of the legacy drivers is enabled. 250 module and compile it only when one of the legacy drivers is enabled.
250* Extract legacy functions into their own headers and remove it that from the
251 monolithic ``drmP.h`` header.
252* Remove any lingering cruft from the OS abstraction layer from modern
253 drivers.
254 251
255This is mostly done, the only thing left is to split up ``drm_irq.c`` into 252This is mostly done, the only thing left is to split up ``drm_irq.c`` into
256legacy cruft and the parts needed by modern KMS drivers. 253legacy cruft and the parts needed by modern KMS drivers.
@@ -408,23 +405,3 @@ Contact: Noralf Trønnes, Daniel Vetter
408 405
409Outside DRM 406Outside DRM
410=========== 407===========
411
412Better kerneldoc
413----------------
414
415This is pretty much done, but there's some advanced topics:
416
417Come up with a way to hyperlink to struct members. Currently you can hyperlink
418to the struct using ``#struct_name``, but not to a member within. Would need
419buy-in from kerneldoc maintainers, and the big question is how to make it work
420without totally unsightly
421``drm_foo_bar_really_long_structure->even_longer_memeber`` all over the text
422which breaks text flow.
423
424Figure out how to integrate the asciidoc support for ascii-diagrams. We have a
425few of those (e.g. to describe mode timings), and asciidoc supports converting
426some ascii-art dialect into pngs. Would be really pretty to make that work.
427
428Contact: Daniel Vetter, Jani Nikula
429
430Jani is working on this already, hopefully lands in 4.8.
diff --git a/Documentation/media/uapi/v4l/subdev-formats.rst b/Documentation/media/uapi/v4l/subdev-formats.rst
index d6152c907b8b..4032d97e022d 100644
--- a/Documentation/media/uapi/v4l/subdev-formats.rst
+++ b/Documentation/media/uapi/v4l/subdev-formats.rst
@@ -1258,6 +1258,319 @@ The following tables list existing packed RGB formats.
1258 - b\ :sub:`2` 1258 - b\ :sub:`2`
1259 - b\ :sub:`1` 1259 - b\ :sub:`1`
1260 - b\ :sub:`0` 1260 - b\ :sub:`0`
1261 * .. _MEDIA-BUS-FMT-RGB101010-1X30:
1262
1263 - MEDIA_BUS_FMT_RGB101010_1X30
1264 - 0x1018
1265 -
1266 - 0
1267 - 0
1268 - r\ :sub:`9`
1269 - r\ :sub:`8`
1270 - r\ :sub:`7`
1271 - r\ :sub:`6`
1272 - r\ :sub:`5`
1273 - r\ :sub:`4`
1274 - r\ :sub:`3`
1275 - r\ :sub:`2`
1276 - r\ :sub:`1`
1277 - r\ :sub:`0`
1278 - g\ :sub:`9`
1279 - g\ :sub:`8`
1280 - g\ :sub:`7`
1281 - g\ :sub:`6`
1282 - g\ :sub:`5`
1283 - g\ :sub:`4`
1284 - g\ :sub:`3`
1285 - g\ :sub:`2`
1286 - g\ :sub:`1`
1287 - g\ :sub:`0`
1288 - b\ :sub:`9`
1289 - b\ :sub:`8`
1290 - b\ :sub:`7`
1291 - b\ :sub:`6`
1292 - b\ :sub:`5`
1293 - b\ :sub:`4`
1294 - b\ :sub:`3`
1295 - b\ :sub:`2`
1296 - b\ :sub:`1`
1297 - b\ :sub:`0`
1298
1299.. raw:: latex
1300
1301 \endgroup
1302
1303
1304The following table list existing packed 36bit wide RGB formats.
1305
1306.. tabularcolumns:: |p{4.0cm}|p{0.7cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|
1307
1308.. _v4l2-mbus-pixelcode-rgb-36:
1309
1310.. raw:: latex
1311
1312 \begingroup
1313 \tiny
1314 \setlength{\tabcolsep}{2pt}
1315
1316.. flat-table:: 36bit RGB formats
1317 :header-rows: 2
1318 :stub-columns: 0
1319 :widths: 36 7 3 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
1320
1321 * - Identifier
1322 - Code
1323 -
1324 - :cspan:`35` Data organization
1325 * -
1326 -
1327 - Bit
1328 - 35
1329 - 34
1330 - 33
1331 - 32
1332 - 31
1333 - 30
1334 - 29
1335 - 28
1336 - 27
1337 - 26
1338 - 25
1339 - 24
1340 - 23
1341 - 22
1342 - 21
1343 - 20
1344 - 19
1345 - 18
1346 - 17
1347 - 16
1348 - 15
1349 - 14
1350 - 13
1351 - 12
1352 - 11
1353 - 10
1354 - 9
1355 - 8
1356 - 7
1357 - 6
1358 - 5
1359 - 4
1360 - 3
1361 - 2
1362 - 1
1363 - 0
1364 * .. _MEDIA-BUS-FMT-RGB121212-1X36:
1365
1366 - MEDIA_BUS_FMT_RGB121212_1X36
1367 - 0x1019
1368 -
1369 - r\ :sub:`11`
1370 - r\ :sub:`10`
1371 - r\ :sub:`9`
1372 - r\ :sub:`8`
1373 - r\ :sub:`7`
1374 - r\ :sub:`6`
1375 - r\ :sub:`5`
1376 - r\ :sub:`4`
1377 - r\ :sub:`3`
1378 - r\ :sub:`2`
1379 - r\ :sub:`1`
1380 - r\ :sub:`0`
1381 - g\ :sub:`11`
1382 - g\ :sub:`10`
1383 - g\ :sub:`9`
1384 - g\ :sub:`8`
1385 - g\ :sub:`7`
1386 - g\ :sub:`6`
1387 - g\ :sub:`5`
1388 - g\ :sub:`4`
1389 - g\ :sub:`3`
1390 - g\ :sub:`2`
1391 - g\ :sub:`1`
1392 - g\ :sub:`0`
1393 - b\ :sub:`11`
1394 - b\ :sub:`10`
1395 - b\ :sub:`9`
1396 - b\ :sub:`8`
1397 - b\ :sub:`7`
1398 - b\ :sub:`6`
1399 - b\ :sub:`5`
1400 - b\ :sub:`4`
1401 - b\ :sub:`3`
1402 - b\ :sub:`2`
1403 - b\ :sub:`1`
1404 - b\ :sub:`0`
1405
1406.. raw:: latex
1407
1408 \endgroup
1409
1410
1411The following table list existing packed 48bit wide RGB formats.
1412
1413.. tabularcolumns:: |p{4.0cm}|p{0.7cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|
1414
1415.. _v4l2-mbus-pixelcode-rgb-48:
1416
1417.. raw:: latex
1418
1419 \begingroup
1420 \tiny
1421 \setlength{\tabcolsep}{2pt}
1422
1423.. flat-table:: 48bit RGB formats
1424 :header-rows: 3
1425 :stub-columns: 0
1426 :widths: 36 7 3 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
1427
1428 * - Identifier
1429 - Code
1430 -
1431 - :cspan:`31` Data organization
1432 * -
1433 -
1434 - Bit
1435 -
1436 -
1437 -
1438 -
1439 -
1440 -
1441 -
1442 -
1443 -
1444 -
1445 -
1446 -
1447 -
1448 -
1449 -
1450 -
1451 - 47
1452 - 46
1453 - 45
1454 - 44
1455 - 43
1456 - 42
1457 - 41
1458 - 40
1459 - 39
1460 - 38
1461 - 37
1462 - 36
1463 - 35
1464 - 34
1465 - 33
1466 - 32
1467 * -
1468 -
1469 -
1470 - 31
1471 - 30
1472 - 29
1473 - 28
1474 - 27
1475 - 26
1476 - 25
1477 - 24
1478 - 23
1479 - 22
1480 - 21
1481 - 20
1482 - 19
1483 - 18
1484 - 17
1485 - 16
1486 - 15
1487 - 14
1488 - 13
1489 - 12
1490 - 11
1491 - 10
1492 - 9
1493 - 8
1494 - 7
1495 - 6
1496 - 5
1497 - 4
1498 - 3
1499 - 2
1500 - 1
1501 - 0
1502 * .. _MEDIA-BUS-FMT-RGB161616-1X48:
1503
1504 - MEDIA_BUS_FMT_RGB161616_1X48
1505 - 0x101a
1506 -
1507 -
1508 -
1509 -
1510 -
1511 -
1512 -
1513 -
1514 -
1515 -
1516 -
1517 -
1518 -
1519 -
1520 -
1521 -
1522 -
1523 - r\ :sub:`15`
1524 - r\ :sub:`14`
1525 - r\ :sub:`13`
1526 - r\ :sub:`12`
1527 - r\ :sub:`11`
1528 - r\ :sub:`10`
1529 - r\ :sub:`9`
1530 - r\ :sub:`8`
1531 - r\ :sub:`7`
1532 - r\ :sub:`6`
1533 - r\ :sub:`5`
1534 - r\ :sub:`4`
1535 - r\ :sub:`3`
1536 - r\ :sub:`2`
1537 - r\ :sub:`1`
1538 - r\ :sub:`0`
1539 * -
1540 -
1541 -
1542 - g\ :sub:`15`
1543 - g\ :sub:`14`
1544 - g\ :sub:`13`
1545 - g\ :sub:`12`
1546 - g\ :sub:`11`
1547 - g\ :sub:`10`
1548 - g\ :sub:`9`
1549 - g\ :sub:`8`
1550 - g\ :sub:`7`
1551 - g\ :sub:`6`
1552 - g\ :sub:`5`
1553 - g\ :sub:`4`
1554 - g\ :sub:`3`
1555 - g\ :sub:`2`
1556 - g\ :sub:`1`
1557 - g\ :sub:`0`
1558 - b\ :sub:`15`
1559 - b\ :sub:`14`
1560 - b\ :sub:`13`
1561 - b\ :sub:`12`
1562 - b\ :sub:`11`
1563 - b\ :sub:`10`
1564 - b\ :sub:`9`
1565 - b\ :sub:`8`
1566 - b\ :sub:`7`
1567 - b\ :sub:`6`
1568 - b\ :sub:`5`
1569 - b\ :sub:`4`
1570 - b\ :sub:`3`
1571 - b\ :sub:`2`
1572 - b\ :sub:`1`
1573 - b\ :sub:`0`
1261 1574
1262.. raw:: latex 1575.. raw:: latex
1263 1576
@@ -2344,7 +2657,8 @@ The format code is made of the following information.
2344 2657
2345- The number of bus samples per pixel. Pixels that are wider than the 2658- The number of bus samples per pixel. Pixels that are wider than the
2346 bus width must be transferred in multiple samples. Common values are 2659 bus width must be transferred in multiple samples. Common values are
2347 1, 1.5 (encoded as 1_5) and 2. 2660 0.5 (encoded as 0_5; in this case two pixels are transferred per bus
2661 sample), 1, 1.5 (encoded as 1_5) and 2.
2348 2662
2349- The bus width. When the bus width is larger than the number of bits 2663- The bus width. When the bus width is larger than the number of bits
2350 per pixel component, several components are packed in a single bus 2664 per pixel component, several components are packed in a single bus
@@ -5962,6 +6276,78 @@ the following codes.
5962 - v\ :sub:`2` 6276 - v\ :sub:`2`
5963 - v\ :sub:`1` 6277 - v\ :sub:`1`
5964 - v\ :sub:`0` 6278 - v\ :sub:`0`
6279 * .. _MEDIA-BUS-FMT-UYYVYY8-0-5X24:
6280
6281 - MEDIA_BUS_FMT_UYYVYY8_0_5X24
6282 - 0x2026
6283 -
6284 -
6285 -
6286 -
6287 -
6288 -
6289 -
6290 -
6291 -
6292 - u\ :sub:`7`
6293 - u\ :sub:`6`
6294 - u\ :sub:`5`
6295 - u\ :sub:`4`
6296 - u\ :sub:`3`
6297 - u\ :sub:`2`
6298 - u\ :sub:`1`
6299 - u\ :sub:`0`
6300 - y\ :sub:`7`
6301 - y\ :sub:`6`
6302 - y\ :sub:`5`
6303 - y\ :sub:`4`
6304 - y\ :sub:`3`
6305 - y\ :sub:`2`
6306 - y\ :sub:`1`
6307 - y\ :sub:`0`
6308 - y\ :sub:`7`
6309 - y\ :sub:`6`
6310 - y\ :sub:`5`
6311 - y\ :sub:`4`
6312 - y\ :sub:`3`
6313 - y\ :sub:`2`
6314 - y\ :sub:`1`
6315 - y\ :sub:`0`
6316 * -
6317 -
6318 -
6319 -
6320 -
6321 -
6322 -
6323 -
6324 -
6325 -
6326 -
6327 - v\ :sub:`7`
6328 - v\ :sub:`6`
6329 - v\ :sub:`5`
6330 - v\ :sub:`4`
6331 - v\ :sub:`3`
6332 - v\ :sub:`2`
6333 - v\ :sub:`1`
6334 - v\ :sub:`0`
6335 - y\ :sub:`7`
6336 - y\ :sub:`6`
6337 - y\ :sub:`5`
6338 - y\ :sub:`4`
6339 - y\ :sub:`3`
6340 - y\ :sub:`2`
6341 - y\ :sub:`1`
6342 - y\ :sub:`0`
6343 - y\ :sub:`7`
6344 - y\ :sub:`6`
6345 - y\ :sub:`5`
6346 - y\ :sub:`4`
6347 - y\ :sub:`3`
6348 - y\ :sub:`2`
6349 - y\ :sub:`1`
6350 - y\ :sub:`0`
5965 * .. _MEDIA-BUS-FMT-UYVY12-1X24: 6351 * .. _MEDIA-BUS-FMT-UYVY12-1X24:
5966 6352
5967 - MEDIA_BUS_FMT_UYVY12_1X24 6353 - MEDIA_BUS_FMT_UYVY12_1X24
@@ -6287,6 +6673,78 @@ the following codes.
6287 - v\ :sub:`2` 6673 - v\ :sub:`2`
6288 - v\ :sub:`1` 6674 - v\ :sub:`1`
6289 - v\ :sub:`0` 6675 - v\ :sub:`0`
6676 * .. _MEDIA-BUS-FMT-UYYVYY10-0-5X30:
6677
6678 - MEDIA_BUS_FMT_UYYVYY10_0_5X30
6679 - 0x2027
6680 -
6681 -
6682 -
6683 - u\ :sub:`9`
6684 - u\ :sub:`8`
6685 - u\ :sub:`7`
6686 - u\ :sub:`6`
6687 - u\ :sub:`5`
6688 - u\ :sub:`4`
6689 - u\ :sub:`3`
6690 - u\ :sub:`2`
6691 - u\ :sub:`1`
6692 - u\ :sub:`0`
6693 - y\ :sub:`9`
6694 - y\ :sub:`8`
6695 - y\ :sub:`7`
6696 - y\ :sub:`6`
6697 - y\ :sub:`5`
6698 - y\ :sub:`4`
6699 - y\ :sub:`3`
6700 - y\ :sub:`2`
6701 - y\ :sub:`1`
6702 - y\ :sub:`0`
6703 - y\ :sub:`9`
6704 - y\ :sub:`8`
6705 - y\ :sub:`7`
6706 - y\ :sub:`6`
6707 - y\ :sub:`5`
6708 - y\ :sub:`4`
6709 - y\ :sub:`3`
6710 - y\ :sub:`2`
6711 - y\ :sub:`1`
6712 - y\ :sub:`0`
6713 * -
6714 -
6715 -
6716 -
6717 -
6718 - v\ :sub:`9`
6719 - v\ :sub:`8`
6720 - v\ :sub:`7`
6721 - v\ :sub:`6`
6722 - v\ :sub:`5`
6723 - v\ :sub:`4`
6724 - v\ :sub:`3`
6725 - v\ :sub:`2`
6726 - v\ :sub:`1`
6727 - v\ :sub:`0`
6728 - y\ :sub:`9`
6729 - y\ :sub:`8`
6730 - y\ :sub:`7`
6731 - y\ :sub:`6`
6732 - y\ :sub:`5`
6733 - y\ :sub:`4`
6734 - y\ :sub:`3`
6735 - y\ :sub:`2`
6736 - y\ :sub:`1`
6737 - y\ :sub:`0`
6738 - y\ :sub:`9`
6739 - y\ :sub:`8`
6740 - y\ :sub:`7`
6741 - y\ :sub:`6`
6742 - y\ :sub:`5`
6743 - y\ :sub:`4`
6744 - y\ :sub:`3`
6745 - y\ :sub:`2`
6746 - y\ :sub:`1`
6747 - y\ :sub:`0`
6290 * .. _MEDIA-BUS-FMT-AYUV8-1X32: 6748 * .. _MEDIA-BUS-FMT-AYUV8-1X32:
6291 6749
6292 - MEDIA_BUS_FMT_AYUV8_1X32 6750 - MEDIA_BUS_FMT_AYUV8_1X32
@@ -6330,6 +6788,506 @@ the following codes.
6330 6788
6331 \endgroup 6789 \endgroup
6332 6790
6791
6792The following table list existing packed 36bit wide YUV formats.
6793
6794.. raw:: latex
6795
6796 \begingroup
6797 \tiny
6798 \setlength{\tabcolsep}{2pt}
6799
6800.. tabularcolumns:: |p{4.0cm}|p{0.7cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|
6801
6802.. _v4l2-mbus-pixelcode-yuv8-36bit:
6803
6804.. flat-table:: 36bit YUV Formats
6805 :header-rows: 2
6806 :stub-columns: 0
6807 :widths: 36 7 3 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
6808
6809 * - Identifier
6810 - Code
6811 -
6812 - :cspan:`35` Data organization
6813 * -
6814 -
6815 - Bit
6816 - 35
6817 - 34
6818 - 33
6819 - 32
6820 - 31
6821 - 30
6822 - 29
6823 - 28
6824 - 27
6825 - 26
6826 - 25
6827 - 24
6828 - 23
6829 - 22
6830 - 21
6831 - 10
6832 - 19
6833 - 18
6834 - 17
6835 - 16
6836 - 15
6837 - 14
6838 - 13
6839 - 12
6840 - 11
6841 - 10
6842 - 9
6843 - 8
6844 - 7
6845 - 6
6846 - 5
6847 - 4
6848 - 3
6849 - 2
6850 - 1
6851 - 0
6852 * .. _MEDIA-BUS-FMT-UYYVYY12-0-5X36:
6853
6854 - MEDIA_BUS_FMT_UYYVYY12_0_5X36
6855 - 0x2028
6856 -
6857 - u\ :sub:`11`
6858 - u\ :sub:`10`
6859 - u\ :sub:`9`
6860 - u\ :sub:`8`
6861 - u\ :sub:`7`
6862 - u\ :sub:`6`
6863 - u\ :sub:`5`
6864 - u\ :sub:`4`
6865 - u\ :sub:`3`
6866 - u\ :sub:`2`
6867 - u\ :sub:`1`
6868 - u\ :sub:`0`
6869 - y\ :sub:`11`
6870 - y\ :sub:`10`
6871 - y\ :sub:`9`
6872 - y\ :sub:`8`
6873 - y\ :sub:`7`
6874 - y\ :sub:`6`
6875 - y\ :sub:`5`
6876 - y\ :sub:`4`
6877 - y\ :sub:`3`
6878 - y\ :sub:`2`
6879 - y\ :sub:`1`
6880 - y\ :sub:`0`
6881 - y\ :sub:`11`
6882 - y\ :sub:`10`
6883 - y\ :sub:`9`
6884 - y\ :sub:`8`
6885 - y\ :sub:`7`
6886 - y\ :sub:`6`
6887 - y\ :sub:`5`
6888 - y\ :sub:`4`
6889 - y\ :sub:`3`
6890 - y\ :sub:`2`
6891 - y\ :sub:`1`
6892 - y\ :sub:`0`
6893 * -
6894 -
6895 -
6896 - v\ :sub:`11`
6897 - v\ :sub:`10`
6898 - v\ :sub:`9`
6899 - v\ :sub:`8`
6900 - v\ :sub:`7`
6901 - v\ :sub:`6`
6902 - v\ :sub:`5`
6903 - v\ :sub:`4`
6904 - v\ :sub:`3`
6905 - v\ :sub:`2`
6906 - v\ :sub:`1`
6907 - v\ :sub:`0`
6908 - y\ :sub:`11`
6909 - y\ :sub:`10`
6910 - y\ :sub:`9`
6911 - y\ :sub:`8`
6912 - y\ :sub:`7`
6913 - y\ :sub:`6`
6914 - y\ :sub:`5`
6915 - y\ :sub:`4`
6916 - y\ :sub:`3`
6917 - y\ :sub:`2`
6918 - y\ :sub:`1`
6919 - y\ :sub:`0`
6920 - y\ :sub:`11`
6921 - y\ :sub:`10`
6922 - y\ :sub:`9`
6923 - y\ :sub:`8`
6924 - y\ :sub:`7`
6925 - y\ :sub:`6`
6926 - y\ :sub:`5`
6927 - y\ :sub:`4`
6928 - y\ :sub:`3`
6929 - y\ :sub:`2`
6930 - y\ :sub:`1`
6931 - y\ :sub:`0`
6932 * .. _MEDIA-BUS-FMT-YUV12-1X36:
6933
6934 - MEDIA_BUS_FMT_YUV12_1X36
6935 - 0x2029
6936 -
6937 - y\ :sub:`11`
6938 - y\ :sub:`10`
6939 - y\ :sub:`9`
6940 - y\ :sub:`8`
6941 - y\ :sub:`7`
6942 - y\ :sub:`6`
6943 - y\ :sub:`5`
6944 - y\ :sub:`4`
6945 - y\ :sub:`3`
6946 - y\ :sub:`2`
6947 - y\ :sub:`1`
6948 - y\ :sub:`0`
6949 - u\ :sub:`11`
6950 - u\ :sub:`10`
6951 - u\ :sub:`9`
6952 - u\ :sub:`8`
6953 - u\ :sub:`7`
6954 - u\ :sub:`6`
6955 - u\ :sub:`5`
6956 - u\ :sub:`4`
6957 - u\ :sub:`3`
6958 - u\ :sub:`2`
6959 - u\ :sub:`1`
6960 - u\ :sub:`0`
6961 - v\ :sub:`11`
6962 - v\ :sub:`10`
6963 - v\ :sub:`9`
6964 - v\ :sub:`8`
6965 - v\ :sub:`7`
6966 - v\ :sub:`6`
6967 - v\ :sub:`5`
6968 - v\ :sub:`4`
6969 - v\ :sub:`3`
6970 - v\ :sub:`2`
6971 - v\ :sub:`1`
6972 - v\ :sub:`0`
6973
6974
6975.. raw:: latex
6976
6977 \endgroup
6978
6979
6980The following table list existing packed 48bit wide YUV formats.
6981
6982.. raw:: latex
6983
6984 \begingroup
6985 \tiny
6986 \setlength{\tabcolsep}{2pt}
6987
6988.. tabularcolumns:: |p{4.0cm}|p{0.7cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|
6989
6990.. _v4l2-mbus-pixelcode-yuv8-48bit:
6991
6992.. flat-table:: 48bit YUV Formats
6993 :header-rows: 3
6994 :stub-columns: 0
6995 :widths: 36 7 3 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
6996
6997 * - Identifier
6998 - Code
6999 -
7000 - :cspan:`31` Data organization
7001 * -
7002 -
7003 - Bit
7004 -
7005 -
7006 -
7007 -
7008 -
7009 -
7010 -
7011 -
7012 -
7013 -
7014 -
7015 -
7016 -
7017 -
7018 -
7019 -
7020 - 47
7021 - 46
7022 - 45
7023 - 44
7024 - 43
7025 - 42
7026 - 41
7027 - 40
7028 - 39
7029 - 38
7030 - 37
7031 - 36
7032 - 35
7033 - 34
7034 - 33
7035 - 32
7036 * -
7037 -
7038 -
7039 - 31
7040 - 30
7041 - 29
7042 - 28
7043 - 27
7044 - 26
7045 - 25
7046 - 24
7047 - 23
7048 - 22
7049 - 21
7050 - 10
7051 - 19
7052 - 18
7053 - 17
7054 - 16
7055 - 15
7056 - 14
7057 - 13
7058 - 12
7059 - 11
7060 - 10
7061 - 9
7062 - 8
7063 - 7
7064 - 6
7065 - 5
7066 - 4
7067 - 3
7068 - 2
7069 - 1
7070 - 0
7071 * .. _MEDIA-BUS-FMT-YUV16-1X48:
7072
7073 - MEDIA_BUS_FMT_YUV16_1X48
7074 - 0x202a
7075 -
7076 -
7077 -
7078 -
7079 -
7080 -
7081 -
7082 -
7083 -
7084 -
7085 -
7086 -
7087 -
7088 -
7089 -
7090 -
7091 -
7092 - y\ :sub:`15`
7093 - y\ :sub:`14`
7094 - y\ :sub:`13`
7095 - y\ :sub:`12`
7096 - y\ :sub:`11`
7097 - y\ :sub:`10`
7098 - y\ :sub:`8`
7099 - y\ :sub:`8`
7100 - y\ :sub:`7`
7101 - y\ :sub:`6`
7102 - y\ :sub:`5`
7103 - y\ :sub:`4`
7104 - y\ :sub:`3`
7105 - y\ :sub:`2`
7106 - y\ :sub:`1`
7107 - y\ :sub:`0`
7108 * -
7109 -
7110 -
7111 - u\ :sub:`15`
7112 - u\ :sub:`14`
7113 - u\ :sub:`13`
7114 - u\ :sub:`12`
7115 - u\ :sub:`11`
7116 - u\ :sub:`10`
7117 - u\ :sub:`9`
7118 - u\ :sub:`8`
7119 - u\ :sub:`7`
7120 - u\ :sub:`6`
7121 - u\ :sub:`5`
7122 - u\ :sub:`4`
7123 - u\ :sub:`3`
7124 - u\ :sub:`2`
7125 - u\ :sub:`1`
7126 - u\ :sub:`0`
7127 - v\ :sub:`15`
7128 - v\ :sub:`14`
7129 - v\ :sub:`13`
7130 - v\ :sub:`12`
7131 - v\ :sub:`11`
7132 - v\ :sub:`10`
7133 - v\ :sub:`9`
7134 - v\ :sub:`8`
7135 - v\ :sub:`7`
7136 - v\ :sub:`6`
7137 - v\ :sub:`5`
7138 - v\ :sub:`4`
7139 - v\ :sub:`3`
7140 - v\ :sub:`2`
7141 - v\ :sub:`1`
7142 - v\ :sub:`0`
7143 * .. _MEDIA-BUS-FMT-UYYVYY16-0-5X48:
7144
7145 - MEDIA_BUS_FMT_UYYVYY16_0_5X48
7146 - 0x202b
7147 -
7148 -
7149 -
7150 -
7151 -
7152 -
7153 -
7154 -
7155 -
7156 -
7157 -
7158 -
7159 -
7160 -
7161 -
7162 -
7163 -
7164 - u\ :sub:`15`
7165 - u\ :sub:`14`
7166 - u\ :sub:`13`
7167 - u\ :sub:`12`
7168 - u\ :sub:`11`
7169 - u\ :sub:`10`
7170 - u\ :sub:`9`
7171 - u\ :sub:`8`
7172 - u\ :sub:`7`
7173 - u\ :sub:`6`
7174 - u\ :sub:`5`
7175 - u\ :sub:`4`
7176 - u\ :sub:`3`
7177 - u\ :sub:`2`
7178 - u\ :sub:`1`
7179 - u\ :sub:`0`
7180 * -
7181 -
7182 -
7183 - y\ :sub:`15`
7184 - y\ :sub:`14`
7185 - y\ :sub:`13`
7186 - y\ :sub:`12`
7187 - y\ :sub:`11`
7188 - y\ :sub:`10`
7189 - y\ :sub:`9`
7190 - y\ :sub:`8`
7191 - y\ :sub:`7`
7192 - y\ :sub:`6`
7193 - y\ :sub:`5`
7194 - y\ :sub:`4`
7195 - y\ :sub:`3`
7196 - y\ :sub:`2`
7197 - y\ :sub:`1`
7198 - y\ :sub:`0`
7199 - y\ :sub:`15`
7200 - y\ :sub:`14`
7201 - y\ :sub:`13`
7202 - y\ :sub:`12`
7203 - y\ :sub:`11`
7204 - y\ :sub:`10`
7205 - y\ :sub:`8`
7206 - y\ :sub:`8`
7207 - y\ :sub:`7`
7208 - y\ :sub:`6`
7209 - y\ :sub:`5`
7210 - y\ :sub:`4`
7211 - y\ :sub:`3`
7212 - y\ :sub:`2`
7213 - y\ :sub:`1`
7214 - y\ :sub:`0`
7215 * -
7216 -
7217 -
7218 -
7219 -
7220 -
7221 -
7222 -
7223 -
7224 -
7225 -
7226 -
7227 -
7228 -
7229 -
7230 -
7231 -
7232 -
7233 -
7234 - v\ :sub:`15`
7235 - v\ :sub:`14`
7236 - v\ :sub:`13`
7237 - v\ :sub:`12`
7238 - v\ :sub:`11`
7239 - v\ :sub:`10`
7240 - v\ :sub:`9`
7241 - v\ :sub:`8`
7242 - v\ :sub:`7`
7243 - v\ :sub:`6`
7244 - v\ :sub:`5`
7245 - v\ :sub:`4`
7246 - v\ :sub:`3`
7247 - v\ :sub:`2`
7248 - v\ :sub:`1`
7249 - v\ :sub:`0`
7250 * -
7251 -
7252 -
7253 - y\ :sub:`15`
7254 - y\ :sub:`14`
7255 - y\ :sub:`13`
7256 - y\ :sub:`12`
7257 - y\ :sub:`11`
7258 - y\ :sub:`10`
7259 - y\ :sub:`9`
7260 - y\ :sub:`8`
7261 - y\ :sub:`7`
7262 - y\ :sub:`6`
7263 - y\ :sub:`5`
7264 - y\ :sub:`4`
7265 - y\ :sub:`3`
7266 - y\ :sub:`2`
7267 - y\ :sub:`1`
7268 - y\ :sub:`0`
7269 - y\ :sub:`15`
7270 - y\ :sub:`14`
7271 - y\ :sub:`13`
7272 - y\ :sub:`12`
7273 - y\ :sub:`11`
7274 - y\ :sub:`10`
7275 - y\ :sub:`8`
7276 - y\ :sub:`8`
7277 - y\ :sub:`7`
7278 - y\ :sub:`6`
7279 - y\ :sub:`5`
7280 - y\ :sub:`4`
7281 - y\ :sub:`3`
7282 - y\ :sub:`2`
7283 - y\ :sub:`1`
7284 - y\ :sub:`0`
7285
7286
7287.. raw:: latex
7288
7289 \endgroup
7290
6333HSV/HSL Formats 7291HSV/HSL Formats
6334^^^^^^^^^^^^^^^ 7292^^^^^^^^^^^^^^^
6335 7293
diff --git a/Documentation/pinctrl.txt b/Documentation/pinctrl.txt
index 54bd5faa8782..f2af35f6d6b2 100644
--- a/Documentation/pinctrl.txt
+++ b/Documentation/pinctrl.txt
@@ -77,9 +77,15 @@ static struct pinctrl_desc foo_desc = {
77 77
78int __init foo_probe(void) 78int __init foo_probe(void)
79{ 79{
80 int error;
81
80 struct pinctrl_dev *pctl; 82 struct pinctrl_dev *pctl;
81 83
82 return pinctrl_register_and_init(&foo_desc, <PARENT>, NULL, &pctl); 84 error = pinctrl_register_and_init(&foo_desc, <PARENT>, NULL, &pctl);
85 if (error)
86 return error;
87
88 return pinctrl_enable(pctl);
83} 89}
84 90
85To enable the pinctrl subsystem and the subgroups for PINMUX and PINCONF and 91To enable the pinctrl subsystem and the subgroups for PINMUX and PINCONF and
diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst
index 11ec2d93a5e0..61e9c78bd6d1 100644
--- a/Documentation/process/stable-kernel-rules.rst
+++ b/Documentation/process/stable-kernel-rules.rst
@@ -124,7 +124,7 @@ specified in the following format in the sign-off area:
124 124
125.. code-block:: none 125.. code-block:: none
126 126
127 Cc: <stable@vger.kernel.org> # 3.3.x- 127 Cc: <stable@vger.kernel.org> # 3.3.x
128 128
129The tag has the meaning of: 129The tag has the meaning of:
130 130
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 3c248f772ae6..fd106899afd1 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -3377,6 +3377,69 @@ struct kvm_ppc_resize_hpt {
3377 __u32 pad; 3377 __u32 pad;
3378}; 3378};
3379 3379
33804.104 KVM_X86_GET_MCE_CAP_SUPPORTED
3381
3382Capability: KVM_CAP_MCE
3383Architectures: x86
3384Type: system ioctl
3385Parameters: u64 mce_cap (out)
3386Returns: 0 on success, -1 on error
3387
3388Returns supported MCE capabilities. The u64 mce_cap parameter
3389has the same format as the MSR_IA32_MCG_CAP register. Supported
3390capabilities will have the corresponding bits set.
3391
33924.105 KVM_X86_SETUP_MCE
3393
3394Capability: KVM_CAP_MCE
3395Architectures: x86
3396Type: vcpu ioctl
3397Parameters: u64 mcg_cap (in)
3398Returns: 0 on success,
3399 -EFAULT if u64 mcg_cap cannot be read,
3400 -EINVAL if the requested number of banks is invalid,
3401 -EINVAL if requested MCE capability is not supported.
3402
3403Initializes MCE support for use. The u64 mcg_cap parameter
3404has the same format as the MSR_IA32_MCG_CAP register and
3405specifies which capabilities should be enabled. The maximum
3406supported number of error-reporting banks can be retrieved when
3407checking for KVM_CAP_MCE. The supported capabilities can be
3408retrieved with KVM_X86_GET_MCE_CAP_SUPPORTED.
3409
34104.106 KVM_X86_SET_MCE
3411
3412Capability: KVM_CAP_MCE
3413Architectures: x86
3414Type: vcpu ioctl
3415Parameters: struct kvm_x86_mce (in)
3416Returns: 0 on success,
3417 -EFAULT if struct kvm_x86_mce cannot be read,
3418 -EINVAL if the bank number is invalid,
3419 -EINVAL if VAL bit is not set in status field.
3420
3421Inject a machine check error (MCE) into the guest. The input
3422parameter is:
3423
3424struct kvm_x86_mce {
3425 __u64 status;
3426 __u64 addr;
3427 __u64 misc;
3428 __u64 mcg_status;
3429 __u8 bank;
3430 __u8 pad1[7];
3431 __u64 pad2[3];
3432};
3433
3434If the MCE being reported is an uncorrected error, KVM will
3435inject it as an MCE exception into the guest. If the guest
3436MCG_STATUS register reports that an MCE is in progress, KVM
3437causes an KVM_EXIT_SHUTDOWN vmexit.
3438
3439Otherwise, if the MCE is a corrected error, KVM will just
3440store it in the corresponding bank (provided this bank is
3441not holding a previously reported uncorrected error).
3442
33805. The kvm_run structure 34435. The kvm_run structure
3381------------------------ 3444------------------------
3382 3445
diff --git a/Documentation/virtual/kvm/devices/arm-vgic.txt b/Documentation/virtual/kvm/devices/arm-vgic.txt
index 76e61c883347..b2f60ca8b60c 100644
--- a/Documentation/virtual/kvm/devices/arm-vgic.txt
+++ b/Documentation/virtual/kvm/devices/arm-vgic.txt
@@ -83,6 +83,12 @@ Groups:
83 83
84 Bits for undefined preemption levels are RAZ/WI. 84 Bits for undefined preemption levels are RAZ/WI.
85 85
86 For historical reasons and to provide ABI compatibility with userspace we
87 export the GICC_PMR register in the format of the GICH_VMCR.VMPriMask
88 field in the lower 5 bits of a word, meaning that userspace must always
89 use the lower 5 bits to communicate with the KVM device and must shift the
90 value left by 3 places to obtain the actual priority mask level.
91
86 Limitations: 92 Limitations:
87 - Priorities are not implemented, and registers are RAZ/WI 93 - Priorities are not implemented, and registers are RAZ/WI
88 - Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2. 94 - Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2.
diff --git a/MAINTAINERS b/MAINTAINERS
index bac1a88cdd4a..66f5b55d205a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4117,14 +4117,13 @@ F: drivers/block/drbd/
4117F: lib/lru_cache.c 4117F: lib/lru_cache.c
4118F: Documentation/blockdev/drbd/ 4118F: Documentation/blockdev/drbd/
4119 4119
4120DRIVER CORE, KOBJECTS, DEBUGFS, KERNFS AND SYSFS 4120DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
4121M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 4121M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
4122T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git 4122T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
4123S: Supported 4123S: Supported
4124F: Documentation/kobject.txt 4124F: Documentation/kobject.txt
4125F: drivers/base/ 4125F: drivers/base/
4126F: fs/debugfs/ 4126F: fs/debugfs/
4127F: fs/kernfs/
4128F: fs/sysfs/ 4127F: fs/sysfs/
4129F: include/linux/debugfs.h 4128F: include/linux/debugfs.h
4130F: include/linux/kobj* 4129F: include/linux/kobj*
@@ -4248,6 +4247,7 @@ L: dri-devel@lists.freedesktop.org
4248S: Supported 4247S: Supported
4249F: drivers/gpu/drm/sun4i/ 4248F: drivers/gpu/drm/sun4i/
4250F: Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt 4249F: Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
4250T: git git://git.kernel.org/pub/scm/linux/kernel/git/mripard/linux.git
4251 4251
4252DRM DRIVERS FOR AMLOGIC SOCS 4252DRM DRIVERS FOR AMLOGIC SOCS
4253M: Neil Armstrong <narmstrong@baylibre.com> 4253M: Neil Armstrong <narmstrong@baylibre.com>
@@ -4257,7 +4257,8 @@ W: http://linux-meson.com/
4257S: Supported 4257S: Supported
4258F: drivers/gpu/drm/meson/ 4258F: drivers/gpu/drm/meson/
4259F: Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt 4259F: Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
4260T: git git://anongit.freedesktop.org/drm/drm-meson 4260F: Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt
4261F: Documentation/gpu/meson.rst
4261T: git git://anongit.freedesktop.org/drm/drm-misc 4262T: git git://anongit.freedesktop.org/drm/drm-misc
4262 4263
4263DRM DRIVERS FOR EXYNOS 4264DRM DRIVERS FOR EXYNOS
@@ -4417,7 +4418,7 @@ DRM DRIVERS FOR STI
4417M: Benjamin Gaignard <benjamin.gaignard@linaro.org> 4418M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
4418M: Vincent Abriou <vincent.abriou@st.com> 4419M: Vincent Abriou <vincent.abriou@st.com>
4419L: dri-devel@lists.freedesktop.org 4420L: dri-devel@lists.freedesktop.org
4420T: git http://git.linaro.org/people/benjamin.gaignard/kernel.git 4421T: git git://anongit.freedesktop.org/drm/drm-misc
4421S: Maintained 4422S: Maintained
4422F: drivers/gpu/drm/sti 4423F: drivers/gpu/drm/sti
4423F: Documentation/devicetree/bindings/display/st,stih4xx.txt 4424F: Documentation/devicetree/bindings/display/st,stih4xx.txt
@@ -4784,6 +4785,12 @@ L: linux-edac@vger.kernel.org
4784S: Maintained 4785S: Maintained
4785F: drivers/edac/mpc85xx_edac.[ch] 4786F: drivers/edac/mpc85xx_edac.[ch]
4786 4787
4788EDAC-PND2
4789M: Tony Luck <tony.luck@intel.com>
4790L: linux-edac@vger.kernel.org
4791S: Maintained
4792F: drivers/edac/pnd2_edac.[ch]
4793
4787EDAC-PASEMI 4794EDAC-PASEMI
4788M: Egor Martovetsky <egor@pasemi.com> 4795M: Egor Martovetsky <egor@pasemi.com>
4789L: linux-edac@vger.kernel.org 4796L: linux-edac@vger.kernel.org
@@ -4931,6 +4938,7 @@ F: include/linux/netfilter_bridge/
4931F: net/bridge/ 4938F: net/bridge/
4932 4939
4933ETHERNET PHY LIBRARY 4940ETHERNET PHY LIBRARY
4941M: Andrew Lunn <andrew@lunn.ch>
4934M: Florian Fainelli <f.fainelli@gmail.com> 4942M: Florian Fainelli <f.fainelli@gmail.com>
4935L: netdev@vger.kernel.org 4943L: netdev@vger.kernel.org
4936S: Maintained 4944S: Maintained
@@ -7092,9 +7100,9 @@ S: Maintained
7092F: fs/autofs4/ 7100F: fs/autofs4/
7093 7101
7094KERNEL BUILD + files below scripts/ (unless maintained elsewhere) 7102KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
7103M: Masahiro Yamada <yamada.masahiro@socionext.com>
7095M: Michal Marek <mmarek@suse.com> 7104M: Michal Marek <mmarek@suse.com>
7096T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git for-next 7105T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git
7097T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git rc-fixes
7098L: linux-kbuild@vger.kernel.org 7106L: linux-kbuild@vger.kernel.org
7099S: Maintained 7107S: Maintained
7100F: Documentation/kbuild/ 7108F: Documentation/kbuild/
@@ -7211,6 +7219,14 @@ F: arch/mips/include/uapi/asm/kvm*
7211F: arch/mips/include/asm/kvm* 7219F: arch/mips/include/asm/kvm*
7212F: arch/mips/kvm/ 7220F: arch/mips/kvm/
7213 7221
7222KERNFS
7223M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
7224M: Tejun Heo <tj@kernel.org>
7225T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
7226S: Supported
7227F: include/linux/kernfs.h
7228F: fs/kernfs/
7229
7214KEXEC 7230KEXEC
7215M: Eric Biederman <ebiederm@xmission.com> 7231M: Eric Biederman <ebiederm@xmission.com>
7216W: http://kernel.org/pub/linux/utils/kernel/kexec/ 7232W: http://kernel.org/pub/linux/utils/kernel/kexec/
@@ -10825,6 +10841,7 @@ F: drivers/s390/block/dasd*
10825F: block/partitions/ibm.c 10841F: block/partitions/ibm.c
10826 10842
10827S390 NETWORK DRIVERS 10843S390 NETWORK DRIVERS
10844M: Julian Wiedmann <jwi@linux.vnet.ibm.com>
10828M: Ursula Braun <ubraun@linux.vnet.ibm.com> 10845M: Ursula Braun <ubraun@linux.vnet.ibm.com>
10829L: linux-s390@vger.kernel.org 10846L: linux-s390@vger.kernel.org
10830W: http://www.ibm.com/developerworks/linux/linux390/ 10847W: http://www.ibm.com/developerworks/linux/linux390/
@@ -10855,6 +10872,7 @@ S: Supported
10855F: drivers/s390/scsi/zfcp_* 10872F: drivers/s390/scsi/zfcp_*
10856 10873
10857S390 IUCV NETWORK LAYER 10874S390 IUCV NETWORK LAYER
10875M: Julian Wiedmann <jwi@linux.vnet.ibm.com>
10858M: Ursula Braun <ubraun@linux.vnet.ibm.com> 10876M: Ursula Braun <ubraun@linux.vnet.ibm.com>
10859L: linux-s390@vger.kernel.org 10877L: linux-s390@vger.kernel.org
10860W: http://www.ibm.com/developerworks/linux/linux390/ 10878W: http://www.ibm.com/developerworks/linux/linux390/
@@ -13314,7 +13332,7 @@ F: drivers/virtio/
13314F: tools/virtio/ 13332F: tools/virtio/
13315F: drivers/net/virtio_net.c 13333F: drivers/net/virtio_net.c
13316F: drivers/block/virtio_blk.c 13334F: drivers/block/virtio_blk.c
13317F: include/linux/virtio_*.h 13335F: include/linux/virtio*.h
13318F: include/uapi/linux/virtio_*.h 13336F: include/uapi/linux/virtio_*.h
13319F: drivers/crypto/virtio/ 13337F: drivers/crypto/virtio/
13320 13338
diff --git a/Makefile b/Makefile
index 231e6a7749bd..5039b9148d15 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 11 2PATCHLEVEL = 11
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc4 4EXTRAVERSION = -rc7
5NAME = Fearless Coyote 5NAME = Fearless Coyote
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -372,7 +372,7 @@ LDFLAGS_MODULE =
372CFLAGS_KERNEL = 372CFLAGS_KERNEL =
373AFLAGS_KERNEL = 373AFLAGS_KERNEL =
374LDFLAGS_vmlinux = 374LDFLAGS_vmlinux =
375CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized 375CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,)
376CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,) 376CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,)
377 377
378 378
@@ -653,6 +653,12 @@ KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \
653# Tell gcc to never replace conditional load with a non-conditional one 653# Tell gcc to never replace conditional load with a non-conditional one
654KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) 654KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
655 655
656# check for 'asm goto'
657ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
658 KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
659 KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
660endif
661
656include scripts/Makefile.gcc-plugins 662include scripts/Makefile.gcc-plugins
657 663
658ifdef CONFIG_READABLE_ASM 664ifdef CONFIG_READABLE_ASM
@@ -798,12 +804,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types)
798# use the deterministic mode of AR if available 804# use the deterministic mode of AR if available
799KBUILD_ARFLAGS := $(call ar-option,D) 805KBUILD_ARFLAGS := $(call ar-option,D)
800 806
801# check for 'asm goto'
802ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
803 KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
804 KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
805endif
806
807include scripts/Makefile.kasan 807include scripts/Makefile.kasan
808include scripts/Makefile.extrawarn 808include scripts/Makefile.extrawarn
809include scripts/Makefile.ubsan 809include scripts/Makefile.ubsan
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 0b961093ca5c..6d76e528ab8f 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1290,7 +1290,7 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
1290 /* copy relevant bits of struct timex. */ 1290 /* copy relevant bits of struct timex. */
1291 if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) || 1291 if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) ||
1292 copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) - 1292 copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) -
1293 offsetof(struct timex32, time))) 1293 offsetof(struct timex32, tick)))
1294 return -EFAULT; 1294 return -EFAULT;
1295 1295
1296 ret = do_adjtimex(&txc); 1296 ret = do_adjtimex(&txc);
diff --git a/arch/arc/boot/dts/skeleton.dtsi b/arch/arc/boot/dts/skeleton.dtsi
index 65808fe0a290..2891cb266cf0 100644
--- a/arch/arc/boot/dts/skeleton.dtsi
+++ b/arch/arc/boot/dts/skeleton.dtsi
@@ -26,6 +26,7 @@
26 device_type = "cpu"; 26 device_type = "cpu";
27 compatible = "snps,arc770d"; 27 compatible = "snps,arc770d";
28 reg = <0>; 28 reg = <0>;
29 clocks = <&core_clk>;
29 }; 30 };
30 }; 31 };
31 32
diff --git a/arch/arc/boot/dts/skeleton_hs.dtsi b/arch/arc/boot/dts/skeleton_hs.dtsi
index 2dfe8037dfbb..5e944d3e5b74 100644
--- a/arch/arc/boot/dts/skeleton_hs.dtsi
+++ b/arch/arc/boot/dts/skeleton_hs.dtsi
@@ -21,6 +21,7 @@
21 device_type = "cpu"; 21 device_type = "cpu";
22 compatible = "snps,archs38"; 22 compatible = "snps,archs38";
23 reg = <0>; 23 reg = <0>;
24 clocks = <&core_clk>;
24 }; 25 };
25 }; 26 };
26 27
diff --git a/arch/arc/boot/dts/skeleton_hs_idu.dtsi b/arch/arc/boot/dts/skeleton_hs_idu.dtsi
index 4c11079f3565..54b277d7dea0 100644
--- a/arch/arc/boot/dts/skeleton_hs_idu.dtsi
+++ b/arch/arc/boot/dts/skeleton_hs_idu.dtsi
@@ -19,8 +19,27 @@
19 19
20 cpu@0 { 20 cpu@0 {
21 device_type = "cpu"; 21 device_type = "cpu";
22 compatible = "snps,archs38xN"; 22 compatible = "snps,archs38";
23 reg = <0>; 23 reg = <0>;
24 clocks = <&core_clk>;
25 };
26 cpu@1 {
27 device_type = "cpu";
28 compatible = "snps,archs38";
29 reg = <1>;
30 clocks = <&core_clk>;
31 };
32 cpu@2 {
33 device_type = "cpu";
34 compatible = "snps,archs38";
35 reg = <2>;
36 clocks = <&core_clk>;
37 };
38 cpu@3 {
39 device_type = "cpu";
40 compatible = "snps,archs38";
41 reg = <3>;
42 clocks = <&core_clk>;
24 }; 43 };
25 }; 44 };
26 45
diff --git a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
index f0df59b23e21..459fc656b759 100644
--- a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
@@ -112,13 +112,19 @@
112 interrupts = <7>; 112 interrupts = <7>;
113 bus-width = <4>; 113 bus-width = <4>;
114 }; 114 };
115 };
115 116
116 /* Embedded Vision subsystem UIO mappings; only relevant for EV VDK */ 117 /*
117 uio_ev: uio@0xD0000000 { 118 * Embedded Vision subsystem UIO mappings; only relevant for EV VDK
118 compatible = "generic-uio"; 119 *
119 reg = <0xD0000000 0x2000 0xD1000000 0x2000 0x90000000 0x10000000 0xC0000000 0x10000000>; 120 * This node is intentionally put outside of MB above becase
120 reg-names = "ev_gsa", "ev_ctrl", "ev_shared_mem", "ev_code_mem"; 121 * it maps areas outside of MB's 0xEz-0xFz.
121 interrupts = <23>; 122 */
122 }; 123 uio_ev: uio@0xD0000000 {
124 compatible = "generic-uio";
125 reg = <0xD0000000 0x2000 0xD1000000 0x2000 0x90000000 0x10000000 0xC0000000 0x10000000>;
126 reg-names = "ev_gsa", "ev_ctrl", "ev_shared_mem", "ev_code_mem";
127 interrupt-parent = <&mb_intc>;
128 interrupts = <23>;
123 }; 129 };
124}; 130};
diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h
index 00bdbe167615..2e52d18e6bc7 100644
--- a/arch/arc/include/asm/kprobes.h
+++ b/arch/arc/include/asm/kprobes.h
@@ -54,9 +54,7 @@ int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause);
54void kretprobe_trampoline(void); 54void kretprobe_trampoline(void);
55void trap_is_kprobe(unsigned long address, struct pt_regs *regs); 55void trap_is_kprobe(unsigned long address, struct pt_regs *regs);
56#else 56#else
57static void trap_is_kprobe(unsigned long address, struct pt_regs *regs) 57#define trap_is_kprobe(address, regs)
58{
59}
60#endif /* CONFIG_KPROBES */ 58#endif /* CONFIG_KPROBES */
61 59
62#endif /* _ARC_KPROBES_H */ 60#endif /* _ARC_KPROBES_H */
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index 2585632eaa68..cc558a25b8fa 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -100,15 +100,21 @@ END(handle_interrupt)
100;################### Non TLB Exception Handling ############################# 100;################### Non TLB Exception Handling #############################
101 101
102ENTRY(EV_SWI) 102ENTRY(EV_SWI)
103 flag 1 103 ; TODO: implement this
104 EXCEPTION_PROLOGUE
105 b ret_from_exception
104END(EV_SWI) 106END(EV_SWI)
105 107
106ENTRY(EV_DivZero) 108ENTRY(EV_DivZero)
107 flag 1 109 ; TODO: implement this
110 EXCEPTION_PROLOGUE
111 b ret_from_exception
108END(EV_DivZero) 112END(EV_DivZero)
109 113
110ENTRY(EV_DCError) 114ENTRY(EV_DCError)
111 flag 1 115 ; TODO: implement this
116 EXCEPTION_PROLOGUE
117 b ret_from_exception
112END(EV_DCError) 118END(EV_DCError)
113 119
114; --------------------------------------------- 120; ---------------------------------------------
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 3093fa898a23..fa62404ba58f 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -10,6 +10,7 @@
10#include <linux/fs.h> 10#include <linux/fs.h>
11#include <linux/delay.h> 11#include <linux/delay.h>
12#include <linux/root_dev.h> 12#include <linux/root_dev.h>
13#include <linux/clk.h>
13#include <linux/clk-provider.h> 14#include <linux/clk-provider.h>
14#include <linux/clocksource.h> 15#include <linux/clocksource.h>
15#include <linux/console.h> 16#include <linux/console.h>
@@ -488,8 +489,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
488{ 489{
489 char *str; 490 char *str;
490 int cpu_id = ptr_to_cpu(v); 491 int cpu_id = ptr_to_cpu(v);
491 struct device_node *core_clk = of_find_node_by_name(NULL, "core_clk"); 492 struct device *cpu_dev = get_cpu_device(cpu_id);
492 u32 freq = 0; 493 struct clk *cpu_clk;
494 unsigned long freq = 0;
493 495
494 if (!cpu_online(cpu_id)) { 496 if (!cpu_online(cpu_id)) {
495 seq_printf(m, "processor [%d]\t: Offline\n", cpu_id); 497 seq_printf(m, "processor [%d]\t: Offline\n", cpu_id);
@@ -502,9 +504,15 @@ static int show_cpuinfo(struct seq_file *m, void *v)
502 504
503 seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE)); 505 seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE));
504 506
505 of_property_read_u32(core_clk, "clock-frequency", &freq); 507 cpu_clk = clk_get(cpu_dev, NULL);
508 if (IS_ERR(cpu_clk)) {
509 seq_printf(m, "CPU speed \t: Cannot get clock for processor [%d]\n",
510 cpu_id);
511 } else {
512 freq = clk_get_rate(cpu_clk);
513 }
506 if (freq) 514 if (freq)
507 seq_printf(m, "CPU speed\t: %u.%02u Mhz\n", 515 seq_printf(m, "CPU speed\t: %lu.%02lu Mhz\n",
508 freq / 1000000, (freq / 10000) % 100); 516 freq / 1000000, (freq / 10000) % 100);
509 517
510 seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n", 518 seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n",
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index d408fa21a07c..928562967f3c 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -633,6 +633,9 @@ noinline static void slc_entire_op(const int op)
633 633
634 write_aux_reg(ARC_REG_SLC_INVALIDATE, 1); 634 write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
635 635
636 /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
637 read_aux_reg(r);
638
636 /* Important to wait for flush to complete */ 639 /* Important to wait for flush to complete */
637 while (read_aux_reg(r) & SLC_CTRL_BUSY); 640 while (read_aux_reg(r) & SLC_CTRL_BUSY);
638} 641}
diff --git a/arch/arm/boot/dts/am335x-baltos.dtsi b/arch/arm/boot/dts/am335x-baltos.dtsi
index efb5eae290a8..d42b98f15e8b 100644
--- a/arch/arm/boot/dts/am335x-baltos.dtsi
+++ b/arch/arm/boot/dts/am335x-baltos.dtsi
@@ -371,6 +371,8 @@
371 371
372 phy1: ethernet-phy@1 { 372 phy1: ethernet-phy@1 {
373 reg = <7>; 373 reg = <7>;
374 eee-broken-100tx;
375 eee-broken-1000t;
374 }; 376 };
375}; 377};
376 378
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index 9e43c443738a..9ba4b18c0cb2 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -672,6 +672,7 @@
672 ti,non-removable; 672 ti,non-removable;
673 bus-width = <4>; 673 bus-width = <4>;
674 cap-power-off-card; 674 cap-power-off-card;
675 keep-power-in-suspend;
675 pinctrl-names = "default"; 676 pinctrl-names = "default";
676 pinctrl-0 = <&mmc2_pins>; 677 pinctrl-0 = <&mmc2_pins>;
677 678
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 2c9e56f4aac5..bbfb9d5a70a9 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -283,6 +283,7 @@
283 device_type = "pci"; 283 device_type = "pci";
284 ranges = <0x81000000 0 0 0x03000 0 0x00010000 284 ranges = <0x81000000 0 0 0x03000 0 0x00010000
285 0x82000000 0 0x20013000 0x13000 0 0xffed000>; 285 0x82000000 0 0x20013000 0x13000 0 0xffed000>;
286 bus-range = <0x00 0xff>;
286 #interrupt-cells = <1>; 287 #interrupt-cells = <1>;
287 num-lanes = <1>; 288 num-lanes = <1>;
288 linux,pci-domain = <0>; 289 linux,pci-domain = <0>;
@@ -319,6 +320,7 @@
319 device_type = "pci"; 320 device_type = "pci";
320 ranges = <0x81000000 0 0 0x03000 0 0x00010000 321 ranges = <0x81000000 0 0 0x03000 0 0x00010000
321 0x82000000 0 0x30013000 0x13000 0 0xffed000>; 322 0x82000000 0 0x30013000 0x13000 0 0xffed000>;
323 bus-range = <0x00 0xff>;
322 #interrupt-cells = <1>; 324 #interrupt-cells = <1>;
323 num-lanes = <1>; 325 num-lanes = <1>;
324 linux,pci-domain = <1>; 326 linux,pci-domain = <1>;
diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
index 8f9a69ca818c..efe53998c961 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
+++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
@@ -121,7 +121,7 @@
121&i2c3 { 121&i2c3 {
122 clock-frequency = <400000>; 122 clock-frequency = <400000>;
123 at24@50 { 123 at24@50 {
124 compatible = "at24,24c02"; 124 compatible = "atmel,24c64";
125 readonly; 125 readonly;
126 reg = <0x50>; 126 reg = <0x50>;
127 }; 127 };
diff --git a/arch/arm/boot/dts/sun8i-a33.dtsi b/arch/arm/boot/dts/sun8i-a33.dtsi
index 18c174fef84f..306af6cadf26 100644
--- a/arch/arm/boot/dts/sun8i-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a33.dtsi
@@ -66,12 +66,6 @@
66 opp-microvolt = <1200000>; 66 opp-microvolt = <1200000>;
67 clock-latency-ns = <244144>; /* 8 32k periods */ 67 clock-latency-ns = <244144>; /* 8 32k periods */
68 }; 68 };
69
70 opp@1200000000 {
71 opp-hz = /bits/ 64 <1200000000>;
72 opp-microvolt = <1320000>;
73 clock-latency-ns = <244144>; /* 8 32k periods */
74 };
75 }; 69 };
76 70
77 cpus { 71 cpus {
@@ -81,16 +75,22 @@
81 operating-points-v2 = <&cpu0_opp_table>; 75 operating-points-v2 = <&cpu0_opp_table>;
82 }; 76 };
83 77
78 cpu@1 {
79 operating-points-v2 = <&cpu0_opp_table>;
80 };
81
84 cpu@2 { 82 cpu@2 {
85 compatible = "arm,cortex-a7"; 83 compatible = "arm,cortex-a7";
86 device_type = "cpu"; 84 device_type = "cpu";
87 reg = <2>; 85 reg = <2>;
86 operating-points-v2 = <&cpu0_opp_table>;
88 }; 87 };
89 88
90 cpu@3 { 89 cpu@3 {
91 compatible = "arm,cortex-a7"; 90 compatible = "arm,cortex-a7";
92 device_type = "cpu"; 91 device_type = "cpu";
93 reg = <3>; 92 reg = <3>;
93 operating-points-v2 = <&cpu0_opp_table>;
94 }; 94 };
95 }; 95 };
96 96
@@ -113,8 +113,8 @@
113 simple-audio-card,mclk-fs = <512>; 113 simple-audio-card,mclk-fs = <512>;
114 simple-audio-card,aux-devs = <&codec_analog>; 114 simple-audio-card,aux-devs = <&codec_analog>;
115 simple-audio-card,routing = 115 simple-audio-card,routing =
116 "Left DAC", "Digital Left DAC", 116 "Left DAC", "AIF1 Slot 0 Left",
117 "Right DAC", "Digital Right DAC"; 117 "Right DAC", "AIF1 Slot 0 Right";
118 status = "disabled"; 118 status = "disabled";
119 119
120 simple-audio-card,cpu { 120 simple-audio-card,cpu {
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 96dba7cd8be7..314eb6abe1ff 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1124,6 +1124,9 @@ static void cpu_hyp_reinit(void)
1124 if (__hyp_get_vectors() == hyp_default_vectors) 1124 if (__hyp_get_vectors() == hyp_default_vectors)
1125 cpu_init_hyp_mode(NULL); 1125 cpu_init_hyp_mode(NULL);
1126 } 1126 }
1127
1128 if (vgic_present)
1129 kvm_vgic_init_cpu_hardware();
1127} 1130}
1128 1131
1129static void cpu_hyp_reset(void) 1132static void cpu_hyp_reset(void)
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 962616fd4ddd..582a972371cf 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -292,11 +292,18 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
292 phys_addr_t addr = start, end = start + size; 292 phys_addr_t addr = start, end = start + size;
293 phys_addr_t next; 293 phys_addr_t next;
294 294
295 assert_spin_locked(&kvm->mmu_lock);
295 pgd = kvm->arch.pgd + stage2_pgd_index(addr); 296 pgd = kvm->arch.pgd + stage2_pgd_index(addr);
296 do { 297 do {
297 next = stage2_pgd_addr_end(addr, end); 298 next = stage2_pgd_addr_end(addr, end);
298 if (!stage2_pgd_none(*pgd)) 299 if (!stage2_pgd_none(*pgd))
299 unmap_stage2_puds(kvm, pgd, addr, next); 300 unmap_stage2_puds(kvm, pgd, addr, next);
301 /*
302 * If the range is too large, release the kvm->mmu_lock
303 * to prevent starvation and lockup detector warnings.
304 */
305 if (next != end)
306 cond_resched_lock(&kvm->mmu_lock);
300 } while (pgd++, addr = next, addr != end); 307 } while (pgd++, addr = next, addr != end);
301} 308}
302 309
@@ -803,6 +810,7 @@ void stage2_unmap_vm(struct kvm *kvm)
803 int idx; 810 int idx;
804 811
805 idx = srcu_read_lock(&kvm->srcu); 812 idx = srcu_read_lock(&kvm->srcu);
813 down_read(&current->mm->mmap_sem);
806 spin_lock(&kvm->mmu_lock); 814 spin_lock(&kvm->mmu_lock);
807 815
808 slots = kvm_memslots(kvm); 816 slots = kvm_memslots(kvm);
@@ -810,6 +818,7 @@ void stage2_unmap_vm(struct kvm *kvm)
810 stage2_unmap_memslot(kvm, memslot); 818 stage2_unmap_memslot(kvm, memslot);
811 819
812 spin_unlock(&kvm->mmu_lock); 820 spin_unlock(&kvm->mmu_lock);
821 up_read(&current->mm->mmap_sem);
813 srcu_read_unlock(&kvm->srcu, idx); 822 srcu_read_unlock(&kvm->srcu, idx);
814} 823}
815 824
@@ -829,7 +838,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
829 if (kvm->arch.pgd == NULL) 838 if (kvm->arch.pgd == NULL)
830 return; 839 return;
831 840
841 spin_lock(&kvm->mmu_lock);
832 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); 842 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
843 spin_unlock(&kvm->mmu_lock);
844
833 /* Free the HW pgd, one page at a time */ 845 /* Free the HW pgd, one page at a time */
834 free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE); 846 free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
835 kvm->arch.pgd = NULL; 847 kvm->arch.pgd = NULL;
@@ -1801,6 +1813,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1801 (KVM_PHYS_SIZE >> PAGE_SHIFT)) 1813 (KVM_PHYS_SIZE >> PAGE_SHIFT))
1802 return -EFAULT; 1814 return -EFAULT;
1803 1815
1816 down_read(&current->mm->mmap_sem);
1804 /* 1817 /*
1805 * A memory region could potentially cover multiple VMAs, and any holes 1818 * A memory region could potentially cover multiple VMAs, and any holes
1806 * between them, so iterate over all of them to find out if we can map 1819 * between them, so iterate over all of them to find out if we can map
@@ -1844,8 +1857,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1844 pa += vm_start - vma->vm_start; 1857 pa += vm_start - vma->vm_start;
1845 1858
1846 /* IO region dirty page logging not allowed */ 1859 /* IO region dirty page logging not allowed */
1847 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) 1860 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1848 return -EINVAL; 1861 ret = -EINVAL;
1862 goto out;
1863 }
1849 1864
1850 ret = kvm_phys_addr_ioremap(kvm, gpa, pa, 1865 ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
1851 vm_end - vm_start, 1866 vm_end - vm_start,
@@ -1857,7 +1872,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1857 } while (hva < reg_end); 1872 } while (hva < reg_end);
1858 1873
1859 if (change == KVM_MR_FLAGS_ONLY) 1874 if (change == KVM_MR_FLAGS_ONLY)
1860 return ret; 1875 goto out;
1861 1876
1862 spin_lock(&kvm->mmu_lock); 1877 spin_lock(&kvm->mmu_lock);
1863 if (ret) 1878 if (ret)
@@ -1865,6 +1880,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1865 else 1880 else
1866 stage2_flush_memslot(kvm, memslot); 1881 stage2_flush_memslot(kvm, memslot);
1867 spin_unlock(&kvm->mmu_lock); 1882 spin_unlock(&kvm->mmu_lock);
1883out:
1884 up_read(&current->mm->mmap_sem);
1868 return ret; 1885 return ret;
1869} 1886}
1870 1887
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index c4f2ace91ea2..3089d3bfa19b 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -270,6 +270,7 @@ extern const struct smp_operations omap4_smp_ops;
270extern int omap4_mpuss_init(void); 270extern int omap4_mpuss_init(void);
271extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state); 271extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state);
272extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state); 272extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
273extern u32 omap4_get_cpu1_ns_pa_addr(void);
273#else 274#else
274static inline int omap4_enter_lowpower(unsigned int cpu, 275static inline int omap4_enter_lowpower(unsigned int cpu,
275 unsigned int power_state) 276 unsigned int power_state)
diff --git a/arch/arm/mach-omap2/omap-hotplug.c b/arch/arm/mach-omap2/omap-hotplug.c
index d3fb5661bb5d..433db6d0b073 100644
--- a/arch/arm/mach-omap2/omap-hotplug.c
+++ b/arch/arm/mach-omap2/omap-hotplug.c
@@ -50,7 +50,7 @@ void omap4_cpu_die(unsigned int cpu)
50 omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF); 50 omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF);
51 51
52 if (omap_secure_apis_support()) 52 if (omap_secure_apis_support())
53 boot_cpu = omap_read_auxcoreboot0(); 53 boot_cpu = omap_read_auxcoreboot0() >> 9;
54 else 54 else
55 boot_cpu = 55 boot_cpu =
56 readl_relaxed(base + OMAP_AUX_CORE_BOOT_0) >> 5; 56 readl_relaxed(base + OMAP_AUX_CORE_BOOT_0) >> 5;
diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
index 113ab2dd2ee9..03ec6d307c82 100644
--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
@@ -64,6 +64,7 @@
64#include "prm-regbits-44xx.h" 64#include "prm-regbits-44xx.h"
65 65
66static void __iomem *sar_base; 66static void __iomem *sar_base;
67static u32 old_cpu1_ns_pa_addr;
67 68
68#if defined(CONFIG_PM) && defined(CONFIG_SMP) 69#if defined(CONFIG_PM) && defined(CONFIG_SMP)
69 70
@@ -212,6 +213,11 @@ static void __init save_l2x0_context(void)
212{} 213{}
213#endif 214#endif
214 215
216u32 omap4_get_cpu1_ns_pa_addr(void)
217{
218 return old_cpu1_ns_pa_addr;
219}
220
215/** 221/**
216 * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function 222 * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
217 * The purpose of this function is to manage low power programming 223 * The purpose of this function is to manage low power programming
@@ -460,22 +466,30 @@ int __init omap4_mpuss_init(void)
460void __init omap4_mpuss_early_init(void) 466void __init omap4_mpuss_early_init(void)
461{ 467{
462 unsigned long startup_pa; 468 unsigned long startup_pa;
469 void __iomem *ns_pa_addr;
463 470
464 if (!(cpu_is_omap44xx() || soc_is_omap54xx())) 471 if (!(soc_is_omap44xx() || soc_is_omap54xx()))
465 return; 472 return;
466 473
467 sar_base = omap4_get_sar_ram_base(); 474 sar_base = omap4_get_sar_ram_base();
468 475
469 if (cpu_is_omap443x()) 476 /* Save old NS_PA_ADDR for validity checks later on */
477 if (soc_is_omap44xx())
478 ns_pa_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
479 else
480 ns_pa_addr = sar_base + OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
481 old_cpu1_ns_pa_addr = readl_relaxed(ns_pa_addr);
482
483 if (soc_is_omap443x())
470 startup_pa = __pa_symbol(omap4_secondary_startup); 484 startup_pa = __pa_symbol(omap4_secondary_startup);
471 else if (cpu_is_omap446x()) 485 else if (soc_is_omap446x())
472 startup_pa = __pa_symbol(omap4460_secondary_startup); 486 startup_pa = __pa_symbol(omap4460_secondary_startup);
473 else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE) 487 else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
474 startup_pa = __pa_symbol(omap5_secondary_hyp_startup); 488 startup_pa = __pa_symbol(omap5_secondary_hyp_startup);
475 else 489 else
476 startup_pa = __pa_symbol(omap5_secondary_startup); 490 startup_pa = __pa_symbol(omap5_secondary_startup);
477 491
478 if (cpu_is_omap44xx()) 492 if (soc_is_omap44xx())
479 writel_relaxed(startup_pa, sar_base + 493 writel_relaxed(startup_pa, sar_base +
480 CPU1_WAKEUP_NS_PA_ADDR_OFFSET); 494 CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
481 else 495 else
diff --git a/arch/arm/mach-omap2/omap-smc.S b/arch/arm/mach-omap2/omap-smc.S
index fd90125bffc7..72506e6cf9e7 100644
--- a/arch/arm/mach-omap2/omap-smc.S
+++ b/arch/arm/mach-omap2/omap-smc.S
@@ -94,6 +94,5 @@ ENTRY(omap_read_auxcoreboot0)
94 ldr r12, =0x103 94 ldr r12, =0x103
95 dsb 95 dsb
96 smc #0 96 smc #0
97 mov r0, r0, lsr #9
98 ldmfd sp!, {r2-r12, pc} 97 ldmfd sp!, {r2-r12, pc}
99ENDPROC(omap_read_auxcoreboot0) 98ENDPROC(omap_read_auxcoreboot0)
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 003353b0b794..3faf454ba487 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -21,6 +21,7 @@
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/irqchip/arm-gic.h> 22#include <linux/irqchip/arm-gic.h>
23 23
24#include <asm/sections.h>
24#include <asm/smp_scu.h> 25#include <asm/smp_scu.h>
25#include <asm/virt.h> 26#include <asm/virt.h>
26 27
@@ -40,10 +41,14 @@
40 41
41#define OMAP5_CORE_COUNT 0x2 42#define OMAP5_CORE_COUNT 0x2
42 43
44#define AUX_CORE_BOOT0_GP_RELEASE 0x020
45#define AUX_CORE_BOOT0_HS_RELEASE 0x200
46
43struct omap_smp_config { 47struct omap_smp_config {
44 unsigned long cpu1_rstctrl_pa; 48 unsigned long cpu1_rstctrl_pa;
45 void __iomem *cpu1_rstctrl_va; 49 void __iomem *cpu1_rstctrl_va;
46 void __iomem *scu_base; 50 void __iomem *scu_base;
51 void __iomem *wakeupgen_base;
47 void *startup_addr; 52 void *startup_addr;
48}; 53};
49 54
@@ -140,7 +145,6 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
140 static struct clockdomain *cpu1_clkdm; 145 static struct clockdomain *cpu1_clkdm;
141 static bool booted; 146 static bool booted;
142 static struct powerdomain *cpu1_pwrdm; 147 static struct powerdomain *cpu1_pwrdm;
143 void __iomem *base = omap_get_wakeupgen_base();
144 148
145 /* 149 /*
146 * Set synchronisation state between this boot processor 150 * Set synchronisation state between this boot processor
@@ -155,9 +159,11 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
155 * A barrier is added to ensure that write buffer is drained 159 * A barrier is added to ensure that write buffer is drained
156 */ 160 */
157 if (omap_secure_apis_support()) 161 if (omap_secure_apis_support())
158 omap_modify_auxcoreboot0(0x200, 0xfffffdff); 162 omap_modify_auxcoreboot0(AUX_CORE_BOOT0_HS_RELEASE,
163 0xfffffdff);
159 else 164 else
160 writel_relaxed(0x20, base + OMAP_AUX_CORE_BOOT_0); 165 writel_relaxed(AUX_CORE_BOOT0_GP_RELEASE,
166 cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
161 167
162 if (!cpu1_clkdm && !cpu1_pwrdm) { 168 if (!cpu1_clkdm && !cpu1_pwrdm) {
163 cpu1_clkdm = clkdm_lookup("mpu1_clkdm"); 169 cpu1_clkdm = clkdm_lookup("mpu1_clkdm");
@@ -261,9 +267,72 @@ static void __init omap4_smp_init_cpus(void)
261 set_cpu_possible(i, true); 267 set_cpu_possible(i, true);
262} 268}
263 269
270/*
271 * For now, just make sure the start-up address is not within the booting
272 * kernel space as that means we just overwrote whatever secondary_startup()
273 * code there was.
274 */
275static bool __init omap4_smp_cpu1_startup_valid(unsigned long addr)
276{
277 if ((addr >= __pa(PAGE_OFFSET)) && (addr <= __pa(__bss_start)))
278 return false;
279
280 return true;
281}
282
283/*
284 * We may need to reset CPU1 before configuring, otherwise kexec boot can end
285 * up trying to use old kernel startup address or suspend-resume will
286 * occasionally fail to bring up CPU1 on 4430 if CPU1 fails to enter deeper
287 * idle states.
288 */
289static void __init omap4_smp_maybe_reset_cpu1(struct omap_smp_config *c)
290{
291 unsigned long cpu1_startup_pa, cpu1_ns_pa_addr;
292 bool needs_reset = false;
293 u32 released;
294
295 if (omap_secure_apis_support())
296 released = omap_read_auxcoreboot0() & AUX_CORE_BOOT0_HS_RELEASE;
297 else
298 released = readl_relaxed(cfg.wakeupgen_base +
299 OMAP_AUX_CORE_BOOT_0) &
300 AUX_CORE_BOOT0_GP_RELEASE;
301 if (released) {
302 pr_warn("smp: CPU1 not parked?\n");
303
304 return;
305 }
306
307 cpu1_startup_pa = readl_relaxed(cfg.wakeupgen_base +
308 OMAP_AUX_CORE_BOOT_1);
309 cpu1_ns_pa_addr = omap4_get_cpu1_ns_pa_addr();
310
311 /* Did the configured secondary_startup() get overwritten? */
312 if (!omap4_smp_cpu1_startup_valid(cpu1_startup_pa))
313 needs_reset = true;
314
315 /*
316 * If omap4 or 5 has NS_PA_ADDR configured, CPU1 may be in a
317 * deeper idle state in WFI and will wake to an invalid address.
318 */
319 if ((soc_is_omap44xx() || soc_is_omap54xx()) &&
320 !omap4_smp_cpu1_startup_valid(cpu1_ns_pa_addr))
321 needs_reset = true;
322
323 if (!needs_reset || !c->cpu1_rstctrl_va)
324 return;
325
326 pr_info("smp: CPU1 parked within kernel, needs reset (0x%lx 0x%lx)\n",
327 cpu1_startup_pa, cpu1_ns_pa_addr);
328
329 writel_relaxed(1, c->cpu1_rstctrl_va);
330 readl_relaxed(c->cpu1_rstctrl_va);
331 writel_relaxed(0, c->cpu1_rstctrl_va);
332}
333
264static void __init omap4_smp_prepare_cpus(unsigned int max_cpus) 334static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
265{ 335{
266 void __iomem *base = omap_get_wakeupgen_base();
267 const struct omap_smp_config *c = NULL; 336 const struct omap_smp_config *c = NULL;
268 337
269 if (soc_is_omap443x()) 338 if (soc_is_omap443x())
@@ -281,6 +350,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
281 /* Must preserve cfg.scu_base set earlier */ 350 /* Must preserve cfg.scu_base set earlier */
282 cfg.cpu1_rstctrl_pa = c->cpu1_rstctrl_pa; 351 cfg.cpu1_rstctrl_pa = c->cpu1_rstctrl_pa;
283 cfg.startup_addr = c->startup_addr; 352 cfg.startup_addr = c->startup_addr;
353 cfg.wakeupgen_base = omap_get_wakeupgen_base();
284 354
285 if (soc_is_dra74x() || soc_is_omap54xx()) { 355 if (soc_is_dra74x() || soc_is_omap54xx()) {
286 if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE) 356 if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
@@ -299,15 +369,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
299 if (cfg.scu_base) 369 if (cfg.scu_base)
300 scu_enable(cfg.scu_base); 370 scu_enable(cfg.scu_base);
301 371
302 /* 372 omap4_smp_maybe_reset_cpu1(&cfg);
303 * Reset CPU1 before configuring, otherwise kexec will
304 * end up trying to use old kernel startup address.
305 */
306 if (cfg.cpu1_rstctrl_va) {
307 writel_relaxed(1, cfg.cpu1_rstctrl_va);
308 readl_relaxed(cfg.cpu1_rstctrl_va);
309 writel_relaxed(0, cfg.cpu1_rstctrl_va);
310 }
311 373
312 /* 374 /*
313 * Write the address of secondary startup routine into the 375 * Write the address of secondary startup routine into the
@@ -319,7 +381,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
319 omap_auxcoreboot_addr(__pa_symbol(cfg.startup_addr)); 381 omap_auxcoreboot_addr(__pa_symbol(cfg.startup_addr));
320 else 382 else
321 writel_relaxed(__pa_symbol(cfg.startup_addr), 383 writel_relaxed(__pa_symbol(cfg.startup_addr),
322 base + OMAP_AUX_CORE_BOOT_1); 384 cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_1);
323} 385}
324 386
325const struct smp_operations omap4_smp_ops __initconst = { 387const struct smp_operations omap4_smp_ops __initconst = {
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index e920dd83e443..f989145480c8 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -222,6 +222,14 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
222 dev_err(dev, "failed to idle\n"); 222 dev_err(dev, "failed to idle\n");
223 } 223 }
224 break; 224 break;
225 case BUS_NOTIFY_BIND_DRIVER:
226 od = to_omap_device(pdev);
227 if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) &&
228 pm_runtime_status_suspended(dev)) {
229 od->_driver_status = BUS_NOTIFY_BIND_DRIVER;
230 pm_runtime_set_active(dev);
231 }
232 break;
225 case BUS_NOTIFY_ADD_DEVICE: 233 case BUS_NOTIFY_ADD_DEVICE:
226 if (pdev->dev.of_node) 234 if (pdev->dev.of_node)
227 omap_device_build_from_dt(pdev); 235 omap_device_build_from_dt(pdev);
diff --git a/arch/arm/mach-orion5x/Kconfig b/arch/arm/mach-orion5x/Kconfig
index 633442ad4e4c..2a7bb6ccdcb7 100644
--- a/arch/arm/mach-orion5x/Kconfig
+++ b/arch/arm/mach-orion5x/Kconfig
@@ -6,6 +6,7 @@ menuconfig ARCH_ORION5X
6 select GPIOLIB 6 select GPIOLIB
7 select MVEBU_MBUS 7 select MVEBU_MBUS
8 select PCI 8 select PCI
9 select PHYLIB if NETDEVICES
9 select PLAT_ORION_LEGACY 10 select PLAT_ORION_LEGACY
10 help 11 help
11 Support for the following Marvell Orion 5x series SoCs: 12 Support for the following Marvell Orion 5x series SoCs:
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 63eabb06f9f1..475811f5383a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -935,13 +935,31 @@ static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_add
935 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); 935 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
936} 936}
937 937
938/*
939 * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
940 * that the intention is to allow exporting memory allocated via the
941 * coherent DMA APIs through the dma_buf API, which only accepts a
942 * scattertable. This presents a couple of problems:
943 * 1. Not all memory allocated via the coherent DMA APIs is backed by
944 * a struct page
945 * 2. Passing coherent DMA memory into the streaming APIs is not allowed
946 * as we will try to flush the memory through a different alias to that
947 * actually being used (and the flushes are redundant.)
948 */
938int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 949int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
939 void *cpu_addr, dma_addr_t handle, size_t size, 950 void *cpu_addr, dma_addr_t handle, size_t size,
940 unsigned long attrs) 951 unsigned long attrs)
941{ 952{
942 struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 953 unsigned long pfn = dma_to_pfn(dev, handle);
954 struct page *page;
943 int ret; 955 int ret;
944 956
957 /* If the PFN is not valid, we do not have a struct page */
958 if (!pfn_valid(pfn))
959 return -ENXIO;
960
961 page = pfn_to_page(pfn);
962
945 ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 963 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
946 if (unlikely(ret)) 964 if (unlikely(ret))
947 return ret; 965 return ret;
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 3b5c7aaf9c76..33a45bd96860 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -303,7 +303,10 @@ static inline void set_vbar(unsigned long val)
303 */ 303 */
304static inline bool security_extensions_enabled(void) 304static inline bool security_extensions_enabled(void)
305{ 305{
306 return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4); 306 /* Check CPUID Identification Scheme before ID_PFR1 read */
307 if ((read_cpuid_id() & 0x000f0000) == 0x000f0000)
308 return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4);
309 return 0;
307} 310}
308 311
309static unsigned long __init setup_vectors_base(void) 312static unsigned long __init setup_vectors_base(void)
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index 9255b6d67ba5..aff6994950ba 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -468,6 +468,7 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
468 eth_data, &orion_ge11); 468 eth_data, &orion_ge11);
469} 469}
470 470
471#ifdef CONFIG_ARCH_ORION5X
471/***************************************************************************** 472/*****************************************************************************
472 * Ethernet switch 473 * Ethernet switch
473 ****************************************************************************/ 474 ****************************************************************************/
@@ -480,6 +481,9 @@ void __init orion_ge00_switch_init(struct dsa_chip_data *d)
480 struct mdio_board_info *bd; 481 struct mdio_board_info *bd;
481 unsigned int i; 482 unsigned int i;
482 483
484 if (!IS_BUILTIN(CONFIG_PHYLIB))
485 return;
486
483 for (i = 0; i < ARRAY_SIZE(d->port_names); i++) 487 for (i = 0; i < ARRAY_SIZE(d->port_names); i++)
484 if (!strcmp(d->port_names[i], "cpu")) 488 if (!strcmp(d->port_names[i], "cpu"))
485 break; 489 break;
@@ -493,6 +497,7 @@ void __init orion_ge00_switch_init(struct dsa_chip_data *d)
493 497
494 mdiobus_register_board_info(&orion_ge00_switch_board_info, 1); 498 mdiobus_register_board_info(&orion_ge00_switch_board_info, 1);
495} 499}
500#endif
496 501
497/***************************************************************************** 502/*****************************************************************************
498 * I2C 503 * I2C
diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
index b6dc9d838a9a..ad1f4e6a9e33 100644
--- a/arch/arm/probes/kprobes/core.c
+++ b/arch/arm/probes/kprobes/core.c
@@ -266,11 +266,20 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
266#endif 266#endif
267 267
268 if (p) { 268 if (p) {
269 if (cur) { 269 if (!p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
270 /*
271 * Probe hit but conditional execution check failed,
272 * so just skip the instruction and continue as if
273 * nothing had happened.
274 * In this case, we can skip recursing check too.
275 */
276 singlestep_skip(p, regs);
277 } else if (cur) {
270 /* Kprobe is pending, so we're recursing. */ 278 /* Kprobe is pending, so we're recursing. */
271 switch (kcb->kprobe_status) { 279 switch (kcb->kprobe_status) {
272 case KPROBE_HIT_ACTIVE: 280 case KPROBE_HIT_ACTIVE:
273 case KPROBE_HIT_SSDONE: 281 case KPROBE_HIT_SSDONE:
282 case KPROBE_HIT_SS:
274 /* A pre- or post-handler probe got us here. */ 283 /* A pre- or post-handler probe got us here. */
275 kprobes_inc_nmissed_count(p); 284 kprobes_inc_nmissed_count(p);
276 save_previous_kprobe(kcb); 285 save_previous_kprobe(kcb);
@@ -279,11 +288,16 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
279 singlestep(p, regs, kcb); 288 singlestep(p, regs, kcb);
280 restore_previous_kprobe(kcb); 289 restore_previous_kprobe(kcb);
281 break; 290 break;
291 case KPROBE_REENTER:
292 /* A nested probe was hit in FIQ, it is a BUG */
293 pr_warn("Unrecoverable kprobe detected at %p.\n",
294 p->addr);
295 /* fall through */
282 default: 296 default:
283 /* impossible cases */ 297 /* impossible cases */
284 BUG(); 298 BUG();
285 } 299 }
286 } else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) { 300 } else {
287 /* Probe hit and conditional execution check ok. */ 301 /* Probe hit and conditional execution check ok. */
288 set_current_kprobe(p); 302 set_current_kprobe(p);
289 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 303 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
@@ -304,13 +318,6 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
304 } 318 }
305 reset_current_kprobe(); 319 reset_current_kprobe();
306 } 320 }
307 } else {
308 /*
309 * Probe hit but conditional execution check failed,
310 * so just skip the instruction and continue as if
311 * nothing had happened.
312 */
313 singlestep_skip(p, regs);
314 } 321 }
315 } else if (cur) { 322 } else if (cur) {
316 /* We probably hit a jprobe. Call its break handler. */ 323 /* We probably hit a jprobe. Call its break handler. */
@@ -434,6 +441,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
434 struct hlist_node *tmp; 441 struct hlist_node *tmp;
435 unsigned long flags, orig_ret_address = 0; 442 unsigned long flags, orig_ret_address = 0;
436 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 443 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
444 kprobe_opcode_t *correct_ret_addr = NULL;
437 445
438 INIT_HLIST_HEAD(&empty_rp); 446 INIT_HLIST_HEAD(&empty_rp);
439 kretprobe_hash_lock(current, &head, &flags); 447 kretprobe_hash_lock(current, &head, &flags);
@@ -456,14 +464,34 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
456 /* another task is sharing our hash bucket */ 464 /* another task is sharing our hash bucket */
457 continue; 465 continue;
458 466
467 orig_ret_address = (unsigned long)ri->ret_addr;
468
469 if (orig_ret_address != trampoline_address)
470 /*
471 * This is the real return address. Any other
472 * instances associated with this task are for
473 * other calls deeper on the call stack
474 */
475 break;
476 }
477
478 kretprobe_assert(ri, orig_ret_address, trampoline_address);
479
480 correct_ret_addr = ri->ret_addr;
481 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
482 if (ri->task != current)
483 /* another task is sharing our hash bucket */
484 continue;
485
486 orig_ret_address = (unsigned long)ri->ret_addr;
459 if (ri->rp && ri->rp->handler) { 487 if (ri->rp && ri->rp->handler) {
460 __this_cpu_write(current_kprobe, &ri->rp->kp); 488 __this_cpu_write(current_kprobe, &ri->rp->kp);
461 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; 489 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
490 ri->ret_addr = correct_ret_addr;
462 ri->rp->handler(ri, regs); 491 ri->rp->handler(ri, regs);
463 __this_cpu_write(current_kprobe, NULL); 492 __this_cpu_write(current_kprobe, NULL);
464 } 493 }
465 494
466 orig_ret_address = (unsigned long)ri->ret_addr;
467 recycle_rp_inst(ri, &empty_rp); 495 recycle_rp_inst(ri, &empty_rp);
468 496
469 if (orig_ret_address != trampoline_address) 497 if (orig_ret_address != trampoline_address)
@@ -475,7 +503,6 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
475 break; 503 break;
476 } 504 }
477 505
478 kretprobe_assert(ri, orig_ret_address, trampoline_address);
479 kretprobe_hash_unlock(current, &flags); 506 kretprobe_hash_unlock(current, &flags);
480 507
481 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { 508 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c
index c893726aa52d..1c98a87786ca 100644
--- a/arch/arm/probes/kprobes/test-core.c
+++ b/arch/arm/probes/kprobes/test-core.c
@@ -977,7 +977,10 @@ static void coverage_end(void)
977void __naked __kprobes_test_case_start(void) 977void __naked __kprobes_test_case_start(void)
978{ 978{
979 __asm__ __volatile__ ( 979 __asm__ __volatile__ (
980 "stmdb sp!, {r4-r11} \n\t" 980 "mov r2, sp \n\t"
981 "bic r3, r2, #7 \n\t"
982 "mov sp, r3 \n\t"
983 "stmdb sp!, {r2-r11} \n\t"
981 "sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" 984 "sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
982 "bic r0, lr, #1 @ r0 = inline data \n\t" 985 "bic r0, lr, #1 @ r0 = inline data \n\t"
983 "mov r1, sp \n\t" 986 "mov r1, sp \n\t"
@@ -997,7 +1000,8 @@ void __naked __kprobes_test_case_end_32(void)
997 "movne pc, r0 \n\t" 1000 "movne pc, r0 \n\t"
998 "mov r0, r4 \n\t" 1001 "mov r0, r4 \n\t"
999 "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" 1002 "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
1000 "ldmia sp!, {r4-r11} \n\t" 1003 "ldmia sp!, {r2-r11} \n\t"
1004 "mov sp, r2 \n\t"
1001 "mov pc, r0 \n\t" 1005 "mov pc, r0 \n\t"
1002 ); 1006 );
1003} 1007}
@@ -1013,7 +1017,8 @@ void __naked __kprobes_test_case_end_16(void)
1013 "bxne r0 \n\t" 1017 "bxne r0 \n\t"
1014 "mov r0, r4 \n\t" 1018 "mov r0, r4 \n\t"
1015 "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" 1019 "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
1016 "ldmia sp!, {r4-r11} \n\t" 1020 "ldmia sp!, {r2-r11} \n\t"
1021 "mov sp, r2 \n\t"
1017 "bx r0 \n\t" 1022 "bx r0 \n\t"
1018 ); 1023 );
1019} 1024}
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
index 1c64ea2d23f9..0565779e66fa 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -179,8 +179,10 @@
179 usbphy: phy@01c19400 { 179 usbphy: phy@01c19400 {
180 compatible = "allwinner,sun50i-a64-usb-phy"; 180 compatible = "allwinner,sun50i-a64-usb-phy";
181 reg = <0x01c19400 0x14>, 181 reg = <0x01c19400 0x14>,
182 <0x01c1a800 0x4>,
182 <0x01c1b800 0x4>; 183 <0x01c1b800 0x4>;
183 reg-names = "phy_ctrl", 184 reg-names = "phy_ctrl",
185 "pmu0",
184 "pmu1"; 186 "pmu1";
185 clocks = <&ccu CLK_USB_PHY0>, 187 clocks = <&ccu CLK_USB_PHY0>,
186 <&ccu CLK_USB_PHY1>; 188 <&ccu CLK_USB_PHY1>;
diff --git a/arch/arm64/include/asm/current.h b/arch/arm64/include/asm/current.h
index 86c404171305..f6580d4afb0e 100644
--- a/arch/arm64/include/asm/current.h
+++ b/arch/arm64/include/asm/current.h
@@ -3,8 +3,6 @@
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5 5
6#include <asm/sysreg.h>
7
8#ifndef __ASSEMBLY__ 6#ifndef __ASSEMBLY__
9 7
10struct task_struct; 8struct task_struct;
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index ef1caae02110..9b1036570586 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -944,7 +944,7 @@ static bool have_cpu_die(void)
944#ifdef CONFIG_HOTPLUG_CPU 944#ifdef CONFIG_HOTPLUG_CPU
945 int any_cpu = raw_smp_processor_id(); 945 int any_cpu = raw_smp_processor_id();
946 946
947 if (cpu_ops[any_cpu]->cpu_die) 947 if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die)
948 return true; 948 return true;
949#endif 949#endif
950 return false; 950 return false;
diff --git a/arch/arm64/kernel/vdso/.gitignore b/arch/arm64/kernel/vdso/.gitignore
index b8cc94e9698b..f8b69d84238e 100644
--- a/arch/arm64/kernel/vdso/.gitignore
+++ b/arch/arm64/kernel/vdso/.gitignore
@@ -1,2 +1 @@
1vdso.lds vdso.lds
2vdso-offsets.h
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 4bf899fb451b..1b35b8bddbfb 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -42,7 +42,20 @@
42#include <asm/pgtable.h> 42#include <asm/pgtable.h>
43#include <asm/tlbflush.h> 43#include <asm/tlbflush.h>
44 44
45static const char *fault_name(unsigned int esr); 45struct fault_info {
46 int (*fn)(unsigned long addr, unsigned int esr,
47 struct pt_regs *regs);
48 int sig;
49 int code;
50 const char *name;
51};
52
53static const struct fault_info fault_info[];
54
55static inline const struct fault_info *esr_to_fault_info(unsigned int esr)
56{
57 return fault_info + (esr & 63);
58}
46 59
47#ifdef CONFIG_KPROBES 60#ifdef CONFIG_KPROBES
48static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr) 61static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
@@ -197,10 +210,12 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
197 struct pt_regs *regs) 210 struct pt_regs *regs)
198{ 211{
199 struct siginfo si; 212 struct siginfo si;
213 const struct fault_info *inf;
200 214
201 if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) { 215 if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) {
216 inf = esr_to_fault_info(esr);
202 pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n", 217 pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
203 tsk->comm, task_pid_nr(tsk), fault_name(esr), sig, 218 tsk->comm, task_pid_nr(tsk), inf->name, sig,
204 addr, esr); 219 addr, esr);
205 show_pte(tsk->mm, addr); 220 show_pte(tsk->mm, addr);
206 show_regs(regs); 221 show_regs(regs);
@@ -219,14 +234,16 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
219{ 234{
220 struct task_struct *tsk = current; 235 struct task_struct *tsk = current;
221 struct mm_struct *mm = tsk->active_mm; 236 struct mm_struct *mm = tsk->active_mm;
237 const struct fault_info *inf;
222 238
223 /* 239 /*
224 * If we are in kernel mode at this point, we have no context to 240 * If we are in kernel mode at this point, we have no context to
225 * handle this fault with. 241 * handle this fault with.
226 */ 242 */
227 if (user_mode(regs)) 243 if (user_mode(regs)) {
228 __do_user_fault(tsk, addr, esr, SIGSEGV, SEGV_MAPERR, regs); 244 inf = esr_to_fault_info(esr);
229 else 245 __do_user_fault(tsk, addr, esr, inf->sig, inf->code, regs);
246 } else
230 __do_kernel_fault(mm, addr, esr, regs); 247 __do_kernel_fault(mm, addr, esr, regs);
231} 248}
232 249
@@ -488,12 +505,7 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
488 return 1; 505 return 1;
489} 506}
490 507
491static const struct fault_info { 508static const struct fault_info fault_info[] = {
492 int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
493 int sig;
494 int code;
495 const char *name;
496} fault_info[] = {
497 { do_bad, SIGBUS, 0, "ttbr address size fault" }, 509 { do_bad, SIGBUS, 0, "ttbr address size fault" },
498 { do_bad, SIGBUS, 0, "level 1 address size fault" }, 510 { do_bad, SIGBUS, 0, "level 1 address size fault" },
499 { do_bad, SIGBUS, 0, "level 2 address size fault" }, 511 { do_bad, SIGBUS, 0, "level 2 address size fault" },
@@ -560,19 +572,13 @@ static const struct fault_info {
560 { do_bad, SIGBUS, 0, "unknown 63" }, 572 { do_bad, SIGBUS, 0, "unknown 63" },
561}; 573};
562 574
563static const char *fault_name(unsigned int esr)
564{
565 const struct fault_info *inf = fault_info + (esr & 63);
566 return inf->name;
567}
568
569/* 575/*
570 * Dispatch a data abort to the relevant handler. 576 * Dispatch a data abort to the relevant handler.
571 */ 577 */
572asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, 578asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
573 struct pt_regs *regs) 579 struct pt_regs *regs)
574{ 580{
575 const struct fault_info *inf = fault_info + (esr & 63); 581 const struct fault_info *inf = esr_to_fault_info(esr);
576 struct siginfo info; 582 struct siginfo info;
577 583
578 if (!inf->fn(addr, esr, regs)) 584 if (!inf->fn(addr, esr, regs))
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index e25584d72396..7514a000e361 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -294,10 +294,6 @@ static __init int setup_hugepagesz(char *opt)
294 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); 294 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
295 } else if (ps == PUD_SIZE) { 295 } else if (ps == PUD_SIZE) {
296 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); 296 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
297 } else if (ps == (PAGE_SIZE * CONT_PTES)) {
298 hugetlb_add_hstate(CONT_PTE_SHIFT);
299 } else if (ps == (PMD_SIZE * CONT_PMDS)) {
300 hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
301 } else { 297 } else {
302 hugetlb_bad_size(); 298 hugetlb_bad_size();
303 pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10); 299 pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
@@ -306,13 +302,3 @@ static __init int setup_hugepagesz(char *opt)
306 return 1; 302 return 1;
307} 303}
308__setup("hugepagesz=", setup_hugepagesz); 304__setup("hugepagesz=", setup_hugepagesz);
309
310#ifdef CONFIG_ARM64_64K_PAGES
311static __init int add_default_hugepagesz(void)
312{
313 if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
314 hugetlb_add_hstate(CONT_PTE_SHIFT);
315 return 0;
316}
317arch_initcall(add_default_hugepagesz);
318#endif
diff --git a/arch/c6x/kernel/ptrace.c b/arch/c6x/kernel/ptrace.c
index a27e1f02ce18..8801dc98fd44 100644
--- a/arch/c6x/kernel/ptrace.c
+++ b/arch/c6x/kernel/ptrace.c
@@ -70,46 +70,6 @@ static int gpr_get(struct task_struct *target,
70 0, sizeof(*regs)); 70 0, sizeof(*regs));
71} 71}
72 72
73static int gpr_set(struct task_struct *target,
74 const struct user_regset *regset,
75 unsigned int pos, unsigned int count,
76 const void *kbuf, const void __user *ubuf)
77{
78 int ret;
79 struct pt_regs *regs = task_pt_regs(target);
80
81 /* Don't copyin TSR or CSR */
82 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
83 &regs,
84 0, PT_TSR * sizeof(long));
85 if (ret)
86 return ret;
87
88 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
89 PT_TSR * sizeof(long),
90 (PT_TSR + 1) * sizeof(long));
91 if (ret)
92 return ret;
93
94 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
95 &regs,
96 (PT_TSR + 1) * sizeof(long),
97 PT_CSR * sizeof(long));
98 if (ret)
99 return ret;
100
101 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
102 PT_CSR * sizeof(long),
103 (PT_CSR + 1) * sizeof(long));
104 if (ret)
105 return ret;
106
107 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
108 &regs,
109 (PT_CSR + 1) * sizeof(long), -1);
110 return ret;
111}
112
113enum c6x_regset { 73enum c6x_regset {
114 REGSET_GPR, 74 REGSET_GPR,
115}; 75};
@@ -121,7 +81,6 @@ static const struct user_regset c6x_regsets[] = {
121 .size = sizeof(u32), 81 .size = sizeof(u32),
122 .align = sizeof(u32), 82 .align = sizeof(u32),
123 .get = gpr_get, 83 .get = gpr_get,
124 .set = gpr_set
125 }, 84 },
126}; 85};
127 86
diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c
index 92075544a19a..0dc1c8f622bc 100644
--- a/arch/h8300/kernel/ptrace.c
+++ b/arch/h8300/kernel/ptrace.c
@@ -95,7 +95,8 @@ static int regs_get(struct task_struct *target,
95 long *reg = (long *)&regs; 95 long *reg = (long *)&regs;
96 96
97 /* build user regs in buffer */ 97 /* build user regs in buffer */
98 for (r = 0; r < ARRAY_SIZE(register_offset); r++) 98 BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0);
99 for (r = 0; r < sizeof(regs) / sizeof(long); r++)
99 *reg++ = h8300_get_reg(target, r); 100 *reg++ = h8300_get_reg(target, r);
100 101
101 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 102 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
@@ -113,7 +114,8 @@ static int regs_set(struct task_struct *target,
113 long *reg; 114 long *reg;
114 115
115 /* build user regs in buffer */ 116 /* build user regs in buffer */
116 for (reg = (long *)&regs, r = 0; r < ARRAY_SIZE(register_offset); r++) 117 BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0);
118 for (reg = (long *)&regs, r = 0; r < sizeof(regs) / sizeof(long); r++)
117 *reg++ = h8300_get_reg(target, r); 119 *reg++ = h8300_get_reg(target, r);
118 120
119 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 121 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
@@ -122,7 +124,7 @@ static int regs_set(struct task_struct *target,
122 return ret; 124 return ret;
123 125
124 /* write back to pt_regs */ 126 /* write back to pt_regs */
125 for (reg = (long *)&regs, r = 0; r < ARRAY_SIZE(register_offset); r++) 127 for (reg = (long *)&regs, r = 0; r < sizeof(regs) / sizeof(long); r++)
126 h8300_put_reg(target, r, *reg++); 128 h8300_put_reg(target, r, *reg++);
127 return 0; 129 return 0;
128} 130}
diff --git a/arch/ia64/include/asm/asm-prototypes.h b/arch/ia64/include/asm/asm-prototypes.h
new file mode 100644
index 000000000000..a2c139808cfe
--- /dev/null
+++ b/arch/ia64/include/asm/asm-prototypes.h
@@ -0,0 +1,29 @@
1#ifndef _ASM_IA64_ASM_PROTOTYPES_H
2#define _ASM_IA64_ASM_PROTOTYPES_H
3
4#include <asm/cacheflush.h>
5#include <asm/checksum.h>
6#include <asm/esi.h>
7#include <asm/ftrace.h>
8#include <asm/page.h>
9#include <asm/pal.h>
10#include <asm/string.h>
11#include <asm/uaccess.h>
12#include <asm/unwind.h>
13#include <asm/xor.h>
14
15extern const char ia64_ivt[];
16
17signed int __divsi3(signed int, unsigned int);
18signed int __modsi3(signed int, unsigned int);
19
20signed long long __divdi3(signed long long, unsigned long long);
21signed long long __moddi3(signed long long, unsigned long long);
22
23unsigned int __udivsi3(unsigned int, unsigned int);
24unsigned int __umodsi3(unsigned int, unsigned int);
25
26unsigned long long __udivdi3(unsigned long long, unsigned long long);
27unsigned long long __umoddi3(unsigned long long, unsigned long long);
28
29#endif /* _ASM_IA64_ASM_PROTOTYPES_H */
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile
index 1f3d3877618f..0a40b14407b1 100644
--- a/arch/ia64/lib/Makefile
+++ b/arch/ia64/lib/Makefile
@@ -24,25 +24,25 @@ AFLAGS___modsi3.o = -DMODULO
24AFLAGS___umodsi3.o = -DUNSIGNED -DMODULO 24AFLAGS___umodsi3.o = -DUNSIGNED -DMODULO
25 25
26$(obj)/__divdi3.o: $(src)/idiv64.S FORCE 26$(obj)/__divdi3.o: $(src)/idiv64.S FORCE
27 $(call if_changed_dep,as_o_S) 27 $(call if_changed_rule,as_o_S)
28 28
29$(obj)/__udivdi3.o: $(src)/idiv64.S FORCE 29$(obj)/__udivdi3.o: $(src)/idiv64.S FORCE
30 $(call if_changed_dep,as_o_S) 30 $(call if_changed_rule,as_o_S)
31 31
32$(obj)/__moddi3.o: $(src)/idiv64.S FORCE 32$(obj)/__moddi3.o: $(src)/idiv64.S FORCE
33 $(call if_changed_dep,as_o_S) 33 $(call if_changed_rule,as_o_S)
34 34
35$(obj)/__umoddi3.o: $(src)/idiv64.S FORCE 35$(obj)/__umoddi3.o: $(src)/idiv64.S FORCE
36 $(call if_changed_dep,as_o_S) 36 $(call if_changed_rule,as_o_S)
37 37
38$(obj)/__divsi3.o: $(src)/idiv32.S FORCE 38$(obj)/__divsi3.o: $(src)/idiv32.S FORCE
39 $(call if_changed_dep,as_o_S) 39 $(call if_changed_rule,as_o_S)
40 40
41$(obj)/__udivsi3.o: $(src)/idiv32.S FORCE 41$(obj)/__udivsi3.o: $(src)/idiv32.S FORCE
42 $(call if_changed_dep,as_o_S) 42 $(call if_changed_rule,as_o_S)
43 43
44$(obj)/__modsi3.o: $(src)/idiv32.S FORCE 44$(obj)/__modsi3.o: $(src)/idiv32.S FORCE
45 $(call if_changed_dep,as_o_S) 45 $(call if_changed_rule,as_o_S)
46 46
47$(obj)/__umodsi3.o: $(src)/idiv32.S FORCE 47$(obj)/__umodsi3.o: $(src)/idiv32.S FORCE
48 $(call if_changed_dep,as_o_S) 48 $(call if_changed_rule,as_o_S)
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 048bf076f7df..531cb9eb3319 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y
25# CONFIG_EFI_PARTITION is not set 25# CONFIG_EFI_PARTITION is not set
26CONFIG_SYSV68_PARTITION=y 26CONFIG_SYSV68_PARTITION=y
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_M68020=y 31CONFIG_M68020=y
@@ -60,6 +61,7 @@ CONFIG_NET_IPVTI=m
60CONFIG_NET_FOU_IP_TUNNELS=y 61CONFIG_NET_FOU_IP_TUNNELS=y
61CONFIG_INET_AH=m 62CONFIG_INET_AH=m
62CONFIG_INET_ESP=m 63CONFIG_INET_ESP=m
64CONFIG_INET_ESP_OFFLOAD=m
63CONFIG_INET_IPCOMP=m 65CONFIG_INET_IPCOMP=m
64CONFIG_INET_XFRM_MODE_TRANSPORT=m 66CONFIG_INET_XFRM_MODE_TRANSPORT=m
65CONFIG_INET_XFRM_MODE_TUNNEL=m 67CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -71,6 +73,7 @@ CONFIG_IPV6=m
71CONFIG_IPV6_ROUTER_PREF=y 73CONFIG_IPV6_ROUTER_PREF=y
72CONFIG_INET6_AH=m 74CONFIG_INET6_AH=m
73CONFIG_INET6_ESP=m 75CONFIG_INET6_ESP=m
76CONFIG_INET6_ESP_OFFLOAD=m
74CONFIG_INET6_IPCOMP=m 77CONFIG_INET6_IPCOMP=m
75CONFIG_IPV6_ILA=m 78CONFIG_IPV6_ILA=m
76CONFIG_IPV6_VTI=m 79CONFIG_IPV6_VTI=m
@@ -101,6 +104,7 @@ CONFIG_NFT_NUMGEN=m
101CONFIG_NFT_CT=m 104CONFIG_NFT_CT=m
102CONFIG_NFT_SET_RBTREE=m 105CONFIG_NFT_SET_RBTREE=m
103CONFIG_NFT_SET_HASH=m 106CONFIG_NFT_SET_HASH=m
107CONFIG_NFT_SET_BITMAP=m
104CONFIG_NFT_COUNTER=m 108CONFIG_NFT_COUNTER=m
105CONFIG_NFT_LOG=m 109CONFIG_NFT_LOG=m
106CONFIG_NFT_LIMIT=m 110CONFIG_NFT_LIMIT=m
@@ -298,6 +302,8 @@ CONFIG_MPLS_IPTUNNEL=m
298CONFIG_NET_L3_MASTER_DEV=y 302CONFIG_NET_L3_MASTER_DEV=y
299CONFIG_AF_KCM=m 303CONFIG_AF_KCM=m
300# CONFIG_WIRELESS is not set 304# CONFIG_WIRELESS is not set
305CONFIG_PSAMPLE=m
306CONFIG_NET_IFE=m
301CONFIG_NET_DEVLINK=m 307CONFIG_NET_DEVLINK=m
302# CONFIG_UEVENT_HELPER is not set 308# CONFIG_UEVENT_HELPER is not set
303CONFIG_DEVTMPFS=y 309CONFIG_DEVTMPFS=y
@@ -371,6 +377,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
371CONFIG_MACVLAN=m 377CONFIG_MACVLAN=m
372CONFIG_MACVTAP=m 378CONFIG_MACVTAP=m
373CONFIG_IPVLAN=m 379CONFIG_IPVLAN=m
380CONFIG_IPVTAP=m
374CONFIG_VXLAN=m 381CONFIG_VXLAN=m
375CONFIG_GENEVE=m 382CONFIG_GENEVE=m
376CONFIG_GTP=m 383CONFIG_GTP=m
@@ -383,6 +390,7 @@ CONFIG_VETH=m
383# CONFIG_NET_VENDOR_AMAZON is not set 390# CONFIG_NET_VENDOR_AMAZON is not set
384CONFIG_A2065=y 391CONFIG_A2065=y
385CONFIG_ARIADNE=y 392CONFIG_ARIADNE=y
393# CONFIG_NET_VENDOR_AQUANTIA is not set
386# CONFIG_NET_VENDOR_ARC is not set 394# CONFIG_NET_VENDOR_ARC is not set
387# CONFIG_NET_CADENCE is not set 395# CONFIG_NET_CADENCE is not set
388# CONFIG_NET_VENDOR_BROADCOM is not set 396# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -404,7 +412,6 @@ CONFIG_ZORRO8390=y
404# CONFIG_NET_VENDOR_SOLARFLARE is not set 412# CONFIG_NET_VENDOR_SOLARFLARE is not set
405# CONFIG_NET_VENDOR_SMSC is not set 413# CONFIG_NET_VENDOR_SMSC is not set
406# CONFIG_NET_VENDOR_STMICRO is not set 414# CONFIG_NET_VENDOR_STMICRO is not set
407# CONFIG_NET_VENDOR_SYNOPSYS is not set
408# CONFIG_NET_VENDOR_VIA is not set 415# CONFIG_NET_VENDOR_VIA is not set
409# CONFIG_NET_VENDOR_WIZNET is not set 416# CONFIG_NET_VENDOR_WIZNET is not set
410CONFIG_PPP=m 417CONFIG_PPP=m
@@ -564,6 +571,8 @@ CONFIG_NLS_MAC_TURKISH=m
564CONFIG_DLM=m 571CONFIG_DLM=m
565# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 572# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
566CONFIG_MAGIC_SYSRQ=y 573CONFIG_MAGIC_SYSRQ=y
574CONFIG_WW_MUTEX_SELFTEST=m
575CONFIG_ATOMIC64_SELFTEST=m
567CONFIG_ASYNC_RAID6_TEST=m 576CONFIG_ASYNC_RAID6_TEST=m
568CONFIG_TEST_HEXDUMP=m 577CONFIG_TEST_HEXDUMP=m
569CONFIG_TEST_STRING_HELPERS=m 578CONFIG_TEST_STRING_HELPERS=m
@@ -594,6 +603,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
594CONFIG_CRYPTO_LRW=m 603CONFIG_CRYPTO_LRW=m
595CONFIG_CRYPTO_PCBC=m 604CONFIG_CRYPTO_PCBC=m
596CONFIG_CRYPTO_KEYWRAP=m 605CONFIG_CRYPTO_KEYWRAP=m
606CONFIG_CRYPTO_CMAC=m
597CONFIG_CRYPTO_XCBC=m 607CONFIG_CRYPTO_XCBC=m
598CONFIG_CRYPTO_VMAC=m 608CONFIG_CRYPTO_VMAC=m
599CONFIG_CRYPTO_MICHAEL_MIC=m 609CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -605,6 +615,7 @@ CONFIG_CRYPTO_SHA512=m
605CONFIG_CRYPTO_SHA3=m 615CONFIG_CRYPTO_SHA3=m
606CONFIG_CRYPTO_TGR192=m 616CONFIG_CRYPTO_TGR192=m
607CONFIG_CRYPTO_WP512=m 617CONFIG_CRYPTO_WP512=m
618CONFIG_CRYPTO_AES_TI=m
608CONFIG_CRYPTO_ANUBIS=m 619CONFIG_CRYPTO_ANUBIS=m
609CONFIG_CRYPTO_BLOWFISH=m 620CONFIG_CRYPTO_BLOWFISH=m
610CONFIG_CRYPTO_CAMELLIA=m 621CONFIG_CRYPTO_CAMELLIA=m
@@ -629,4 +640,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
629CONFIG_CRYPTO_USER_API_RNG=m 640CONFIG_CRYPTO_USER_API_RNG=m
630CONFIG_CRYPTO_USER_API_AEAD=m 641CONFIG_CRYPTO_USER_API_AEAD=m
631# CONFIG_CRYPTO_HW is not set 642# CONFIG_CRYPTO_HW is not set
643CONFIG_CRC32_SELFTEST=m
632CONFIG_XZ_DEC_TEST=m 644CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index d4de24963f5f..ca91d39555da 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y
26# CONFIG_EFI_PARTITION is not set 26# CONFIG_EFI_PARTITION is not set
27CONFIG_SYSV68_PARTITION=y 27CONFIG_SYSV68_PARTITION=y
28CONFIG_IOSCHED_DEADLINE=m 28CONFIG_IOSCHED_DEADLINE=m
29CONFIG_MQ_IOSCHED_DEADLINE=m
29CONFIG_KEXEC=y 30CONFIG_KEXEC=y
30CONFIG_BOOTINFO_PROC=y 31CONFIG_BOOTINFO_PROC=y
31CONFIG_M68020=y 32CONFIG_M68020=y
@@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m
58CONFIG_NET_FOU_IP_TUNNELS=y 59CONFIG_NET_FOU_IP_TUNNELS=y
59CONFIG_INET_AH=m 60CONFIG_INET_AH=m
60CONFIG_INET_ESP=m 61CONFIG_INET_ESP=m
62CONFIG_INET_ESP_OFFLOAD=m
61CONFIG_INET_IPCOMP=m 63CONFIG_INET_IPCOMP=m
62CONFIG_INET_XFRM_MODE_TRANSPORT=m 64CONFIG_INET_XFRM_MODE_TRANSPORT=m
63CONFIG_INET_XFRM_MODE_TUNNEL=m 65CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -69,6 +71,7 @@ CONFIG_IPV6=m
69CONFIG_IPV6_ROUTER_PREF=y 71CONFIG_IPV6_ROUTER_PREF=y
70CONFIG_INET6_AH=m 72CONFIG_INET6_AH=m
71CONFIG_INET6_ESP=m 73CONFIG_INET6_ESP=m
74CONFIG_INET6_ESP_OFFLOAD=m
72CONFIG_INET6_IPCOMP=m 75CONFIG_INET6_IPCOMP=m
73CONFIG_IPV6_ILA=m 76CONFIG_IPV6_ILA=m
74CONFIG_IPV6_VTI=m 77CONFIG_IPV6_VTI=m
@@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m
99CONFIG_NFT_CT=m 102CONFIG_NFT_CT=m
100CONFIG_NFT_SET_RBTREE=m 103CONFIG_NFT_SET_RBTREE=m
101CONFIG_NFT_SET_HASH=m 104CONFIG_NFT_SET_HASH=m
105CONFIG_NFT_SET_BITMAP=m
102CONFIG_NFT_COUNTER=m 106CONFIG_NFT_COUNTER=m
103CONFIG_NFT_LOG=m 107CONFIG_NFT_LOG=m
104CONFIG_NFT_LIMIT=m 108CONFIG_NFT_LIMIT=m
@@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m
296CONFIG_NET_L3_MASTER_DEV=y 300CONFIG_NET_L3_MASTER_DEV=y
297CONFIG_AF_KCM=m 301CONFIG_AF_KCM=m
298# CONFIG_WIRELESS is not set 302# CONFIG_WIRELESS is not set
303CONFIG_PSAMPLE=m
304CONFIG_NET_IFE=m
299CONFIG_NET_DEVLINK=m 305CONFIG_NET_DEVLINK=m
300# CONFIG_UEVENT_HELPER is not set 306# CONFIG_UEVENT_HELPER is not set
301CONFIG_DEVTMPFS=y 307CONFIG_DEVTMPFS=y
@@ -353,6 +359,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
353CONFIG_MACVLAN=m 359CONFIG_MACVLAN=m
354CONFIG_MACVTAP=m 360CONFIG_MACVTAP=m
355CONFIG_IPVLAN=m 361CONFIG_IPVLAN=m
362CONFIG_IPVTAP=m
356CONFIG_VXLAN=m 363CONFIG_VXLAN=m
357CONFIG_GENEVE=m 364CONFIG_GENEVE=m
358CONFIG_GTP=m 365CONFIG_GTP=m
@@ -362,6 +369,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y
362CONFIG_VETH=m 369CONFIG_VETH=m
363# CONFIG_NET_VENDOR_ALACRITECH is not set 370# CONFIG_NET_VENDOR_ALACRITECH is not set
364# CONFIG_NET_VENDOR_AMAZON is not set 371# CONFIG_NET_VENDOR_AMAZON is not set
372# CONFIG_NET_VENDOR_AQUANTIA is not set
365# CONFIG_NET_VENDOR_ARC is not set 373# CONFIG_NET_VENDOR_ARC is not set
366# CONFIG_NET_CADENCE is not set 374# CONFIG_NET_CADENCE is not set
367# CONFIG_NET_VENDOR_BROADCOM is not set 375# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -378,7 +386,6 @@ CONFIG_VETH=m
378# CONFIG_NET_VENDOR_SEEQ is not set 386# CONFIG_NET_VENDOR_SEEQ is not set
379# CONFIG_NET_VENDOR_SOLARFLARE is not set 387# CONFIG_NET_VENDOR_SOLARFLARE is not set
380# CONFIG_NET_VENDOR_STMICRO is not set 388# CONFIG_NET_VENDOR_STMICRO is not set
381# CONFIG_NET_VENDOR_SYNOPSYS is not set
382# CONFIG_NET_VENDOR_VIA is not set 389# CONFIG_NET_VENDOR_VIA is not set
383# CONFIG_NET_VENDOR_WIZNET is not set 390# CONFIG_NET_VENDOR_WIZNET is not set
384CONFIG_PPP=m 391CONFIG_PPP=m
@@ -523,6 +530,8 @@ CONFIG_NLS_MAC_TURKISH=m
523CONFIG_DLM=m 530CONFIG_DLM=m
524# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 531# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
525CONFIG_MAGIC_SYSRQ=y 532CONFIG_MAGIC_SYSRQ=y
533CONFIG_WW_MUTEX_SELFTEST=m
534CONFIG_ATOMIC64_SELFTEST=m
526CONFIG_ASYNC_RAID6_TEST=m 535CONFIG_ASYNC_RAID6_TEST=m
527CONFIG_TEST_HEXDUMP=m 536CONFIG_TEST_HEXDUMP=m
528CONFIG_TEST_STRING_HELPERS=m 537CONFIG_TEST_STRING_HELPERS=m
@@ -553,6 +562,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
553CONFIG_CRYPTO_LRW=m 562CONFIG_CRYPTO_LRW=m
554CONFIG_CRYPTO_PCBC=m 563CONFIG_CRYPTO_PCBC=m
555CONFIG_CRYPTO_KEYWRAP=m 564CONFIG_CRYPTO_KEYWRAP=m
565CONFIG_CRYPTO_CMAC=m
556CONFIG_CRYPTO_XCBC=m 566CONFIG_CRYPTO_XCBC=m
557CONFIG_CRYPTO_VMAC=m 567CONFIG_CRYPTO_VMAC=m
558CONFIG_CRYPTO_MICHAEL_MIC=m 568CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -564,6 +574,7 @@ CONFIG_CRYPTO_SHA512=m
564CONFIG_CRYPTO_SHA3=m 574CONFIG_CRYPTO_SHA3=m
565CONFIG_CRYPTO_TGR192=m 575CONFIG_CRYPTO_TGR192=m
566CONFIG_CRYPTO_WP512=m 576CONFIG_CRYPTO_WP512=m
577CONFIG_CRYPTO_AES_TI=m
567CONFIG_CRYPTO_ANUBIS=m 578CONFIG_CRYPTO_ANUBIS=m
568CONFIG_CRYPTO_BLOWFISH=m 579CONFIG_CRYPTO_BLOWFISH=m
569CONFIG_CRYPTO_CAMELLIA=m 580CONFIG_CRYPTO_CAMELLIA=m
@@ -588,4 +599,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
588CONFIG_CRYPTO_USER_API_RNG=m 599CONFIG_CRYPTO_USER_API_RNG=m
589CONFIG_CRYPTO_USER_API_AEAD=m 600CONFIG_CRYPTO_USER_API_AEAD=m
590# CONFIG_CRYPTO_HW is not set 601# CONFIG_CRYPTO_HW is not set
602CONFIG_CRC32_SELFTEST=m
591CONFIG_XZ_DEC_TEST=m 603CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index fc0fd3f871f3..23a3d8a691e2 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y
25# CONFIG_EFI_PARTITION is not set 25# CONFIG_EFI_PARTITION is not set
26CONFIG_SYSV68_PARTITION=y 26CONFIG_SYSV68_PARTITION=y
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_M68020=y 31CONFIG_M68020=y
@@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m
58CONFIG_NET_FOU_IP_TUNNELS=y 59CONFIG_NET_FOU_IP_TUNNELS=y
59CONFIG_INET_AH=m 60CONFIG_INET_AH=m
60CONFIG_INET_ESP=m 61CONFIG_INET_ESP=m
62CONFIG_INET_ESP_OFFLOAD=m
61CONFIG_INET_IPCOMP=m 63CONFIG_INET_IPCOMP=m
62CONFIG_INET_XFRM_MODE_TRANSPORT=m 64CONFIG_INET_XFRM_MODE_TRANSPORT=m
63CONFIG_INET_XFRM_MODE_TUNNEL=m 65CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -69,6 +71,7 @@ CONFIG_IPV6=m
69CONFIG_IPV6_ROUTER_PREF=y 71CONFIG_IPV6_ROUTER_PREF=y
70CONFIG_INET6_AH=m 72CONFIG_INET6_AH=m
71CONFIG_INET6_ESP=m 73CONFIG_INET6_ESP=m
74CONFIG_INET6_ESP_OFFLOAD=m
72CONFIG_INET6_IPCOMP=m 75CONFIG_INET6_IPCOMP=m
73CONFIG_IPV6_ILA=m 76CONFIG_IPV6_ILA=m
74CONFIG_IPV6_VTI=m 77CONFIG_IPV6_VTI=m
@@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m
99CONFIG_NFT_CT=m 102CONFIG_NFT_CT=m
100CONFIG_NFT_SET_RBTREE=m 103CONFIG_NFT_SET_RBTREE=m
101CONFIG_NFT_SET_HASH=m 104CONFIG_NFT_SET_HASH=m
105CONFIG_NFT_SET_BITMAP=m
102CONFIG_NFT_COUNTER=m 106CONFIG_NFT_COUNTER=m
103CONFIG_NFT_LOG=m 107CONFIG_NFT_LOG=m
104CONFIG_NFT_LIMIT=m 108CONFIG_NFT_LIMIT=m
@@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m
296CONFIG_NET_L3_MASTER_DEV=y 300CONFIG_NET_L3_MASTER_DEV=y
297CONFIG_AF_KCM=m 301CONFIG_AF_KCM=m
298# CONFIG_WIRELESS is not set 302# CONFIG_WIRELESS is not set
303CONFIG_PSAMPLE=m
304CONFIG_NET_IFE=m
299CONFIG_NET_DEVLINK=m 305CONFIG_NET_DEVLINK=m
300# CONFIG_UEVENT_HELPER is not set 306# CONFIG_UEVENT_HELPER is not set
301CONFIG_DEVTMPFS=y 307CONFIG_DEVTMPFS=y
@@ -362,6 +368,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
362CONFIG_MACVLAN=m 368CONFIG_MACVLAN=m
363CONFIG_MACVTAP=m 369CONFIG_MACVTAP=m
364CONFIG_IPVLAN=m 370CONFIG_IPVLAN=m
371CONFIG_IPVTAP=m
365CONFIG_VXLAN=m 372CONFIG_VXLAN=m
366CONFIG_GENEVE=m 373CONFIG_GENEVE=m
367CONFIG_GTP=m 374CONFIG_GTP=m
@@ -372,6 +379,7 @@ CONFIG_VETH=m
372# CONFIG_NET_VENDOR_ALACRITECH is not set 379# CONFIG_NET_VENDOR_ALACRITECH is not set
373# CONFIG_NET_VENDOR_AMAZON is not set 380# CONFIG_NET_VENDOR_AMAZON is not set
374CONFIG_ATARILANCE=y 381CONFIG_ATARILANCE=y
382# CONFIG_NET_VENDOR_AQUANTIA is not set
375# CONFIG_NET_VENDOR_ARC is not set 383# CONFIG_NET_VENDOR_ARC is not set
376# CONFIG_NET_CADENCE is not set 384# CONFIG_NET_CADENCE is not set
377# CONFIG_NET_VENDOR_BROADCOM is not set 385# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -389,7 +397,6 @@ CONFIG_NE2000=y
389# CONFIG_NET_VENDOR_SOLARFLARE is not set 397# CONFIG_NET_VENDOR_SOLARFLARE is not set
390CONFIG_SMC91X=y 398CONFIG_SMC91X=y
391# CONFIG_NET_VENDOR_STMICRO is not set 399# CONFIG_NET_VENDOR_STMICRO is not set
392# CONFIG_NET_VENDOR_SYNOPSYS is not set
393# CONFIG_NET_VENDOR_VIA is not set 400# CONFIG_NET_VENDOR_VIA is not set
394# CONFIG_NET_VENDOR_WIZNET is not set 401# CONFIG_NET_VENDOR_WIZNET is not set
395CONFIG_PPP=m 402CONFIG_PPP=m
@@ -544,6 +551,8 @@ CONFIG_NLS_MAC_TURKISH=m
544CONFIG_DLM=m 551CONFIG_DLM=m
545# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 552# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
546CONFIG_MAGIC_SYSRQ=y 553CONFIG_MAGIC_SYSRQ=y
554CONFIG_WW_MUTEX_SELFTEST=m
555CONFIG_ATOMIC64_SELFTEST=m
547CONFIG_ASYNC_RAID6_TEST=m 556CONFIG_ASYNC_RAID6_TEST=m
548CONFIG_TEST_HEXDUMP=m 557CONFIG_TEST_HEXDUMP=m
549CONFIG_TEST_STRING_HELPERS=m 558CONFIG_TEST_STRING_HELPERS=m
@@ -574,6 +583,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
574CONFIG_CRYPTO_LRW=m 583CONFIG_CRYPTO_LRW=m
575CONFIG_CRYPTO_PCBC=m 584CONFIG_CRYPTO_PCBC=m
576CONFIG_CRYPTO_KEYWRAP=m 585CONFIG_CRYPTO_KEYWRAP=m
586CONFIG_CRYPTO_CMAC=m
577CONFIG_CRYPTO_XCBC=m 587CONFIG_CRYPTO_XCBC=m
578CONFIG_CRYPTO_VMAC=m 588CONFIG_CRYPTO_VMAC=m
579CONFIG_CRYPTO_MICHAEL_MIC=m 589CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -585,6 +595,7 @@ CONFIG_CRYPTO_SHA512=m
585CONFIG_CRYPTO_SHA3=m 595CONFIG_CRYPTO_SHA3=m
586CONFIG_CRYPTO_TGR192=m 596CONFIG_CRYPTO_TGR192=m
587CONFIG_CRYPTO_WP512=m 597CONFIG_CRYPTO_WP512=m
598CONFIG_CRYPTO_AES_TI=m
588CONFIG_CRYPTO_ANUBIS=m 599CONFIG_CRYPTO_ANUBIS=m
589CONFIG_CRYPTO_BLOWFISH=m 600CONFIG_CRYPTO_BLOWFISH=m
590CONFIG_CRYPTO_CAMELLIA=m 601CONFIG_CRYPTO_CAMELLIA=m
@@ -609,4 +620,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
609CONFIG_CRYPTO_USER_API_RNG=m 620CONFIG_CRYPTO_USER_API_RNG=m
610CONFIG_CRYPTO_USER_API_AEAD=m 621CONFIG_CRYPTO_USER_API_AEAD=m
611# CONFIG_CRYPTO_HW is not set 622# CONFIG_CRYPTO_HW is not set
623CONFIG_CRC32_SELFTEST=m
612CONFIG_XZ_DEC_TEST=m 624CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 52e984a0aa69..95deb95140fe 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
25CONFIG_SUN_PARTITION=y 25CONFIG_SUN_PARTITION=y
26# CONFIG_EFI_PARTITION is not set 26# CONFIG_EFI_PARTITION is not set
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_M68040=y 31CONFIG_M68040=y
@@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m
56CONFIG_NET_FOU_IP_TUNNELS=y 57CONFIG_NET_FOU_IP_TUNNELS=y
57CONFIG_INET_AH=m 58CONFIG_INET_AH=m
58CONFIG_INET_ESP=m 59CONFIG_INET_ESP=m
60CONFIG_INET_ESP_OFFLOAD=m
59CONFIG_INET_IPCOMP=m 61CONFIG_INET_IPCOMP=m
60CONFIG_INET_XFRM_MODE_TRANSPORT=m 62CONFIG_INET_XFRM_MODE_TRANSPORT=m
61CONFIG_INET_XFRM_MODE_TUNNEL=m 63CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -67,6 +69,7 @@ CONFIG_IPV6=m
67CONFIG_IPV6_ROUTER_PREF=y 69CONFIG_IPV6_ROUTER_PREF=y
68CONFIG_INET6_AH=m 70CONFIG_INET6_AH=m
69CONFIG_INET6_ESP=m 71CONFIG_INET6_ESP=m
72CONFIG_INET6_ESP_OFFLOAD=m
70CONFIG_INET6_IPCOMP=m 73CONFIG_INET6_IPCOMP=m
71CONFIG_IPV6_ILA=m 74CONFIG_IPV6_ILA=m
72CONFIG_IPV6_VTI=m 75CONFIG_IPV6_VTI=m
@@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m
97CONFIG_NFT_CT=m 100CONFIG_NFT_CT=m
98CONFIG_NFT_SET_RBTREE=m 101CONFIG_NFT_SET_RBTREE=m
99CONFIG_NFT_SET_HASH=m 102CONFIG_NFT_SET_HASH=m
103CONFIG_NFT_SET_BITMAP=m
100CONFIG_NFT_COUNTER=m 104CONFIG_NFT_COUNTER=m
101CONFIG_NFT_LOG=m 105CONFIG_NFT_LOG=m
102CONFIG_NFT_LIMIT=m 106CONFIG_NFT_LIMIT=m
@@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m
294CONFIG_NET_L3_MASTER_DEV=y 298CONFIG_NET_L3_MASTER_DEV=y
295CONFIG_AF_KCM=m 299CONFIG_AF_KCM=m
296# CONFIG_WIRELESS is not set 300# CONFIG_WIRELESS is not set
301CONFIG_PSAMPLE=m
302CONFIG_NET_IFE=m
297CONFIG_NET_DEVLINK=m 303CONFIG_NET_DEVLINK=m
298# CONFIG_UEVENT_HELPER is not set 304# CONFIG_UEVENT_HELPER is not set
299CONFIG_DEVTMPFS=y 305CONFIG_DEVTMPFS=y
@@ -352,6 +358,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
352CONFIG_MACVLAN=m 358CONFIG_MACVLAN=m
353CONFIG_MACVTAP=m 359CONFIG_MACVTAP=m
354CONFIG_IPVLAN=m 360CONFIG_IPVLAN=m
361CONFIG_IPVTAP=m
355CONFIG_VXLAN=m 362CONFIG_VXLAN=m
356CONFIG_GENEVE=m 363CONFIG_GENEVE=m
357CONFIG_GTP=m 364CONFIG_GTP=m
@@ -361,6 +368,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y
361CONFIG_VETH=m 368CONFIG_VETH=m
362# CONFIG_NET_VENDOR_ALACRITECH is not set 369# CONFIG_NET_VENDOR_ALACRITECH is not set
363# CONFIG_NET_VENDOR_AMAZON is not set 370# CONFIG_NET_VENDOR_AMAZON is not set
371# CONFIG_NET_VENDOR_AQUANTIA is not set
364# CONFIG_NET_VENDOR_ARC is not set 372# CONFIG_NET_VENDOR_ARC is not set
365# CONFIG_NET_CADENCE is not set 373# CONFIG_NET_CADENCE is not set
366# CONFIG_NET_VENDOR_BROADCOM is not set 374# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -377,7 +385,6 @@ CONFIG_BVME6000_NET=y
377# CONFIG_NET_VENDOR_SEEQ is not set 385# CONFIG_NET_VENDOR_SEEQ is not set
378# CONFIG_NET_VENDOR_SOLARFLARE is not set 386# CONFIG_NET_VENDOR_SOLARFLARE is not set
379# CONFIG_NET_VENDOR_STMICRO is not set 387# CONFIG_NET_VENDOR_STMICRO is not set
380# CONFIG_NET_VENDOR_SYNOPSYS is not set
381# CONFIG_NET_VENDOR_VIA is not set 388# CONFIG_NET_VENDOR_VIA is not set
382# CONFIG_NET_VENDOR_WIZNET is not set 389# CONFIG_NET_VENDOR_WIZNET is not set
383CONFIG_PPP=m 390CONFIG_PPP=m
@@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m
515CONFIG_DLM=m 522CONFIG_DLM=m
516# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 523# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
517CONFIG_MAGIC_SYSRQ=y 524CONFIG_MAGIC_SYSRQ=y
525CONFIG_WW_MUTEX_SELFTEST=m
526CONFIG_ATOMIC64_SELFTEST=m
518CONFIG_ASYNC_RAID6_TEST=m 527CONFIG_ASYNC_RAID6_TEST=m
519CONFIG_TEST_HEXDUMP=m 528CONFIG_TEST_HEXDUMP=m
520CONFIG_TEST_STRING_HELPERS=m 529CONFIG_TEST_STRING_HELPERS=m
@@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
545CONFIG_CRYPTO_LRW=m 554CONFIG_CRYPTO_LRW=m
546CONFIG_CRYPTO_PCBC=m 555CONFIG_CRYPTO_PCBC=m
547CONFIG_CRYPTO_KEYWRAP=m 556CONFIG_CRYPTO_KEYWRAP=m
557CONFIG_CRYPTO_CMAC=m
548CONFIG_CRYPTO_XCBC=m 558CONFIG_CRYPTO_XCBC=m
549CONFIG_CRYPTO_VMAC=m 559CONFIG_CRYPTO_VMAC=m
550CONFIG_CRYPTO_MICHAEL_MIC=m 560CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m
556CONFIG_CRYPTO_SHA3=m 566CONFIG_CRYPTO_SHA3=m
557CONFIG_CRYPTO_TGR192=m 567CONFIG_CRYPTO_TGR192=m
558CONFIG_CRYPTO_WP512=m 568CONFIG_CRYPTO_WP512=m
569CONFIG_CRYPTO_AES_TI=m
559CONFIG_CRYPTO_ANUBIS=m 570CONFIG_CRYPTO_ANUBIS=m
560CONFIG_CRYPTO_BLOWFISH=m 571CONFIG_CRYPTO_BLOWFISH=m
561CONFIG_CRYPTO_CAMELLIA=m 572CONFIG_CRYPTO_CAMELLIA=m
@@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
580CONFIG_CRYPTO_USER_API_RNG=m 591CONFIG_CRYPTO_USER_API_RNG=m
581CONFIG_CRYPTO_USER_API_AEAD=m 592CONFIG_CRYPTO_USER_API_AEAD=m
582# CONFIG_CRYPTO_HW is not set 593# CONFIG_CRYPTO_HW is not set
594CONFIG_CRC32_SELFTEST=m
583CONFIG_XZ_DEC_TEST=m 595CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index aaeed4422cc9..afae6958db2d 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y
26# CONFIG_EFI_PARTITION is not set 26# CONFIG_EFI_PARTITION is not set
27CONFIG_SYSV68_PARTITION=y 27CONFIG_SYSV68_PARTITION=y
28CONFIG_IOSCHED_DEADLINE=m 28CONFIG_IOSCHED_DEADLINE=m
29CONFIG_MQ_IOSCHED_DEADLINE=m
29CONFIG_KEXEC=y 30CONFIG_KEXEC=y
30CONFIG_BOOTINFO_PROC=y 31CONFIG_BOOTINFO_PROC=y
31CONFIG_M68020=y 32CONFIG_M68020=y
@@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m
58CONFIG_NET_FOU_IP_TUNNELS=y 59CONFIG_NET_FOU_IP_TUNNELS=y
59CONFIG_INET_AH=m 60CONFIG_INET_AH=m
60CONFIG_INET_ESP=m 61CONFIG_INET_ESP=m
62CONFIG_INET_ESP_OFFLOAD=m
61CONFIG_INET_IPCOMP=m 63CONFIG_INET_IPCOMP=m
62CONFIG_INET_XFRM_MODE_TRANSPORT=m 64CONFIG_INET_XFRM_MODE_TRANSPORT=m
63CONFIG_INET_XFRM_MODE_TUNNEL=m 65CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -69,6 +71,7 @@ CONFIG_IPV6=m
69CONFIG_IPV6_ROUTER_PREF=y 71CONFIG_IPV6_ROUTER_PREF=y
70CONFIG_INET6_AH=m 72CONFIG_INET6_AH=m
71CONFIG_INET6_ESP=m 73CONFIG_INET6_ESP=m
74CONFIG_INET6_ESP_OFFLOAD=m
72CONFIG_INET6_IPCOMP=m 75CONFIG_INET6_IPCOMP=m
73CONFIG_IPV6_ILA=m 76CONFIG_IPV6_ILA=m
74CONFIG_IPV6_VTI=m 77CONFIG_IPV6_VTI=m
@@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m
99CONFIG_NFT_CT=m 102CONFIG_NFT_CT=m
100CONFIG_NFT_SET_RBTREE=m 103CONFIG_NFT_SET_RBTREE=m
101CONFIG_NFT_SET_HASH=m 104CONFIG_NFT_SET_HASH=m
105CONFIG_NFT_SET_BITMAP=m
102CONFIG_NFT_COUNTER=m 106CONFIG_NFT_COUNTER=m
103CONFIG_NFT_LOG=m 107CONFIG_NFT_LOG=m
104CONFIG_NFT_LIMIT=m 108CONFIG_NFT_LIMIT=m
@@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m
296CONFIG_NET_L3_MASTER_DEV=y 300CONFIG_NET_L3_MASTER_DEV=y
297CONFIG_AF_KCM=m 301CONFIG_AF_KCM=m
298# CONFIG_WIRELESS is not set 302# CONFIG_WIRELESS is not set
303CONFIG_PSAMPLE=m
304CONFIG_NET_IFE=m
299CONFIG_NET_DEVLINK=m 305CONFIG_NET_DEVLINK=m
300# CONFIG_UEVENT_HELPER is not set 306# CONFIG_UEVENT_HELPER is not set
301CONFIG_DEVTMPFS=y 307CONFIG_DEVTMPFS=y
@@ -353,6 +359,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
353CONFIG_MACVLAN=m 359CONFIG_MACVLAN=m
354CONFIG_MACVTAP=m 360CONFIG_MACVTAP=m
355CONFIG_IPVLAN=m 361CONFIG_IPVLAN=m
362CONFIG_IPVTAP=m
356CONFIG_VXLAN=m 363CONFIG_VXLAN=m
357CONFIG_GENEVE=m 364CONFIG_GENEVE=m
358CONFIG_GTP=m 365CONFIG_GTP=m
@@ -363,6 +370,7 @@ CONFIG_VETH=m
363# CONFIG_NET_VENDOR_ALACRITECH is not set 370# CONFIG_NET_VENDOR_ALACRITECH is not set
364# CONFIG_NET_VENDOR_AMAZON is not set 371# CONFIG_NET_VENDOR_AMAZON is not set
365CONFIG_HPLANCE=y 372CONFIG_HPLANCE=y
373# CONFIG_NET_VENDOR_AQUANTIA is not set
366# CONFIG_NET_VENDOR_ARC is not set 374# CONFIG_NET_VENDOR_ARC is not set
367# CONFIG_NET_CADENCE is not set 375# CONFIG_NET_CADENCE is not set
368# CONFIG_NET_VENDOR_BROADCOM is not set 376# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -379,7 +387,6 @@ CONFIG_HPLANCE=y
379# CONFIG_NET_VENDOR_SEEQ is not set 387# CONFIG_NET_VENDOR_SEEQ is not set
380# CONFIG_NET_VENDOR_SOLARFLARE is not set 388# CONFIG_NET_VENDOR_SOLARFLARE is not set
381# CONFIG_NET_VENDOR_STMICRO is not set 389# CONFIG_NET_VENDOR_STMICRO is not set
382# CONFIG_NET_VENDOR_SYNOPSYS is not set
383# CONFIG_NET_VENDOR_VIA is not set 390# CONFIG_NET_VENDOR_VIA is not set
384# CONFIG_NET_VENDOR_WIZNET is not set 391# CONFIG_NET_VENDOR_WIZNET is not set
385CONFIG_PPP=m 392CONFIG_PPP=m
@@ -525,6 +532,8 @@ CONFIG_NLS_MAC_TURKISH=m
525CONFIG_DLM=m 532CONFIG_DLM=m
526# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 533# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
527CONFIG_MAGIC_SYSRQ=y 534CONFIG_MAGIC_SYSRQ=y
535CONFIG_WW_MUTEX_SELFTEST=m
536CONFIG_ATOMIC64_SELFTEST=m
528CONFIG_ASYNC_RAID6_TEST=m 537CONFIG_ASYNC_RAID6_TEST=m
529CONFIG_TEST_HEXDUMP=m 538CONFIG_TEST_HEXDUMP=m
530CONFIG_TEST_STRING_HELPERS=m 539CONFIG_TEST_STRING_HELPERS=m
@@ -555,6 +564,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
555CONFIG_CRYPTO_LRW=m 564CONFIG_CRYPTO_LRW=m
556CONFIG_CRYPTO_PCBC=m 565CONFIG_CRYPTO_PCBC=m
557CONFIG_CRYPTO_KEYWRAP=m 566CONFIG_CRYPTO_KEYWRAP=m
567CONFIG_CRYPTO_CMAC=m
558CONFIG_CRYPTO_XCBC=m 568CONFIG_CRYPTO_XCBC=m
559CONFIG_CRYPTO_VMAC=m 569CONFIG_CRYPTO_VMAC=m
560CONFIG_CRYPTO_MICHAEL_MIC=m 570CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -566,6 +576,7 @@ CONFIG_CRYPTO_SHA512=m
566CONFIG_CRYPTO_SHA3=m 576CONFIG_CRYPTO_SHA3=m
567CONFIG_CRYPTO_TGR192=m 577CONFIG_CRYPTO_TGR192=m
568CONFIG_CRYPTO_WP512=m 578CONFIG_CRYPTO_WP512=m
579CONFIG_CRYPTO_AES_TI=m
569CONFIG_CRYPTO_ANUBIS=m 580CONFIG_CRYPTO_ANUBIS=m
570CONFIG_CRYPTO_BLOWFISH=m 581CONFIG_CRYPTO_BLOWFISH=m
571CONFIG_CRYPTO_CAMELLIA=m 582CONFIG_CRYPTO_CAMELLIA=m
@@ -590,4 +601,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
590CONFIG_CRYPTO_USER_API_RNG=m 601CONFIG_CRYPTO_USER_API_RNG=m
591CONFIG_CRYPTO_USER_API_AEAD=m 602CONFIG_CRYPTO_USER_API_AEAD=m
592# CONFIG_CRYPTO_HW is not set 603# CONFIG_CRYPTO_HW is not set
604CONFIG_CRC32_SELFTEST=m
593CONFIG_XZ_DEC_TEST=m 605CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 3bbc9b2f0dac..b010734729a7 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y
25# CONFIG_EFI_PARTITION is not set 25# CONFIG_EFI_PARTITION is not set
26CONFIG_SYSV68_PARTITION=y 26CONFIG_SYSV68_PARTITION=y
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_M68020=y 31CONFIG_M68020=y
@@ -57,6 +58,7 @@ CONFIG_NET_IPVTI=m
57CONFIG_NET_FOU_IP_TUNNELS=y 58CONFIG_NET_FOU_IP_TUNNELS=y
58CONFIG_INET_AH=m 59CONFIG_INET_AH=m
59CONFIG_INET_ESP=m 60CONFIG_INET_ESP=m
61CONFIG_INET_ESP_OFFLOAD=m
60CONFIG_INET_IPCOMP=m 62CONFIG_INET_IPCOMP=m
61CONFIG_INET_XFRM_MODE_TRANSPORT=m 63CONFIG_INET_XFRM_MODE_TRANSPORT=m
62CONFIG_INET_XFRM_MODE_TUNNEL=m 64CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -68,6 +70,7 @@ CONFIG_IPV6=m
68CONFIG_IPV6_ROUTER_PREF=y 70CONFIG_IPV6_ROUTER_PREF=y
69CONFIG_INET6_AH=m 71CONFIG_INET6_AH=m
70CONFIG_INET6_ESP=m 72CONFIG_INET6_ESP=m
73CONFIG_INET6_ESP_OFFLOAD=m
71CONFIG_INET6_IPCOMP=m 74CONFIG_INET6_IPCOMP=m
72CONFIG_IPV6_ILA=m 75CONFIG_IPV6_ILA=m
73CONFIG_IPV6_VTI=m 76CONFIG_IPV6_VTI=m
@@ -98,6 +101,7 @@ CONFIG_NFT_NUMGEN=m
98CONFIG_NFT_CT=m 101CONFIG_NFT_CT=m
99CONFIG_NFT_SET_RBTREE=m 102CONFIG_NFT_SET_RBTREE=m
100CONFIG_NFT_SET_HASH=m 103CONFIG_NFT_SET_HASH=m
104CONFIG_NFT_SET_BITMAP=m
101CONFIG_NFT_COUNTER=m 105CONFIG_NFT_COUNTER=m
102CONFIG_NFT_LOG=m 106CONFIG_NFT_LOG=m
103CONFIG_NFT_LIMIT=m 107CONFIG_NFT_LIMIT=m
@@ -298,6 +302,8 @@ CONFIG_MPLS_IPTUNNEL=m
298CONFIG_NET_L3_MASTER_DEV=y 302CONFIG_NET_L3_MASTER_DEV=y
299CONFIG_AF_KCM=m 303CONFIG_AF_KCM=m
300# CONFIG_WIRELESS is not set 304# CONFIG_WIRELESS is not set
305CONFIG_PSAMPLE=m
306CONFIG_NET_IFE=m
301CONFIG_NET_DEVLINK=m 307CONFIG_NET_DEVLINK=m
302# CONFIG_UEVENT_HELPER is not set 308# CONFIG_UEVENT_HELPER is not set
303CONFIG_DEVTMPFS=y 309CONFIG_DEVTMPFS=y
@@ -369,6 +375,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
369CONFIG_MACVLAN=m 375CONFIG_MACVLAN=m
370CONFIG_MACVTAP=m 376CONFIG_MACVTAP=m
371CONFIG_IPVLAN=m 377CONFIG_IPVLAN=m
378CONFIG_IPVTAP=m
372CONFIG_VXLAN=m 379CONFIG_VXLAN=m
373CONFIG_GENEVE=m 380CONFIG_GENEVE=m
374CONFIG_GTP=m 381CONFIG_GTP=m
@@ -379,6 +386,7 @@ CONFIG_VETH=m
379# CONFIG_NET_VENDOR_ALACRITECH is not set 386# CONFIG_NET_VENDOR_ALACRITECH is not set
380# CONFIG_NET_VENDOR_AMAZON is not set 387# CONFIG_NET_VENDOR_AMAZON is not set
381CONFIG_MACMACE=y 388CONFIG_MACMACE=y
389# CONFIG_NET_VENDOR_AQUANTIA is not set
382# CONFIG_NET_VENDOR_ARC is not set 390# CONFIG_NET_VENDOR_ARC is not set
383# CONFIG_NET_CADENCE is not set 391# CONFIG_NET_CADENCE is not set
384# CONFIG_NET_VENDOR_BROADCOM is not set 392# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -398,7 +406,6 @@ CONFIG_MAC8390=y
398# CONFIG_NET_VENDOR_SOLARFLARE is not set 406# CONFIG_NET_VENDOR_SOLARFLARE is not set
399# CONFIG_NET_VENDOR_SMSC is not set 407# CONFIG_NET_VENDOR_SMSC is not set
400# CONFIG_NET_VENDOR_STMICRO is not set 408# CONFIG_NET_VENDOR_STMICRO is not set
401# CONFIG_NET_VENDOR_SYNOPSYS is not set
402# CONFIG_NET_VENDOR_VIA is not set 409# CONFIG_NET_VENDOR_VIA is not set
403# CONFIG_NET_VENDOR_WIZNET is not set 410# CONFIG_NET_VENDOR_WIZNET is not set
404CONFIG_PPP=m 411CONFIG_PPP=m
@@ -547,6 +554,8 @@ CONFIG_NLS_MAC_TURKISH=m
547CONFIG_DLM=m 554CONFIG_DLM=m
548# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 555# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
549CONFIG_MAGIC_SYSRQ=y 556CONFIG_MAGIC_SYSRQ=y
557CONFIG_WW_MUTEX_SELFTEST=m
558CONFIG_ATOMIC64_SELFTEST=m
550CONFIG_ASYNC_RAID6_TEST=m 559CONFIG_ASYNC_RAID6_TEST=m
551CONFIG_TEST_HEXDUMP=m 560CONFIG_TEST_HEXDUMP=m
552CONFIG_TEST_STRING_HELPERS=m 561CONFIG_TEST_STRING_HELPERS=m
@@ -577,6 +586,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
577CONFIG_CRYPTO_LRW=m 586CONFIG_CRYPTO_LRW=m
578CONFIG_CRYPTO_PCBC=m 587CONFIG_CRYPTO_PCBC=m
579CONFIG_CRYPTO_KEYWRAP=m 588CONFIG_CRYPTO_KEYWRAP=m
589CONFIG_CRYPTO_CMAC=m
580CONFIG_CRYPTO_XCBC=m 590CONFIG_CRYPTO_XCBC=m
581CONFIG_CRYPTO_VMAC=m 591CONFIG_CRYPTO_VMAC=m
582CONFIG_CRYPTO_MICHAEL_MIC=m 592CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -588,6 +598,7 @@ CONFIG_CRYPTO_SHA512=m
588CONFIG_CRYPTO_SHA3=m 598CONFIG_CRYPTO_SHA3=m
589CONFIG_CRYPTO_TGR192=m 599CONFIG_CRYPTO_TGR192=m
590CONFIG_CRYPTO_WP512=m 600CONFIG_CRYPTO_WP512=m
601CONFIG_CRYPTO_AES_TI=m
591CONFIG_CRYPTO_ANUBIS=m 602CONFIG_CRYPTO_ANUBIS=m
592CONFIG_CRYPTO_BLOWFISH=m 603CONFIG_CRYPTO_BLOWFISH=m
593CONFIG_CRYPTO_CAMELLIA=m 604CONFIG_CRYPTO_CAMELLIA=m
@@ -612,4 +623,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
612CONFIG_CRYPTO_USER_API_RNG=m 623CONFIG_CRYPTO_USER_API_RNG=m
613CONFIG_CRYPTO_USER_API_AEAD=m 624CONFIG_CRYPTO_USER_API_AEAD=m
614# CONFIG_CRYPTO_HW is not set 625# CONFIG_CRYPTO_HW is not set
626CONFIG_CRC32_SELFTEST=m
615CONFIG_XZ_DEC_TEST=m 627CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 8f2c0decb2f8..0e414549b235 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -21,6 +21,7 @@ CONFIG_SOLARIS_X86_PARTITION=y
21CONFIG_UNIXWARE_DISKLABEL=y 21CONFIG_UNIXWARE_DISKLABEL=y
22# CONFIG_EFI_PARTITION is not set 22# CONFIG_EFI_PARTITION is not set
23CONFIG_IOSCHED_DEADLINE=m 23CONFIG_IOSCHED_DEADLINE=m
24CONFIG_MQ_IOSCHED_DEADLINE=m
24CONFIG_KEXEC=y 25CONFIG_KEXEC=y
25CONFIG_BOOTINFO_PROC=y 26CONFIG_BOOTINFO_PROC=y
26CONFIG_M68020=y 27CONFIG_M68020=y
@@ -67,6 +68,7 @@ CONFIG_NET_IPVTI=m
67CONFIG_NET_FOU_IP_TUNNELS=y 68CONFIG_NET_FOU_IP_TUNNELS=y
68CONFIG_INET_AH=m 69CONFIG_INET_AH=m
69CONFIG_INET_ESP=m 70CONFIG_INET_ESP=m
71CONFIG_INET_ESP_OFFLOAD=m
70CONFIG_INET_IPCOMP=m 72CONFIG_INET_IPCOMP=m
71CONFIG_INET_XFRM_MODE_TRANSPORT=m 73CONFIG_INET_XFRM_MODE_TRANSPORT=m
72CONFIG_INET_XFRM_MODE_TUNNEL=m 74CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -78,6 +80,7 @@ CONFIG_IPV6=m
78CONFIG_IPV6_ROUTER_PREF=y 80CONFIG_IPV6_ROUTER_PREF=y
79CONFIG_INET6_AH=m 81CONFIG_INET6_AH=m
80CONFIG_INET6_ESP=m 82CONFIG_INET6_ESP=m
83CONFIG_INET6_ESP_OFFLOAD=m
81CONFIG_INET6_IPCOMP=m 84CONFIG_INET6_IPCOMP=m
82CONFIG_IPV6_ILA=m 85CONFIG_IPV6_ILA=m
83CONFIG_IPV6_VTI=m 86CONFIG_IPV6_VTI=m
@@ -108,6 +111,7 @@ CONFIG_NFT_NUMGEN=m
108CONFIG_NFT_CT=m 111CONFIG_NFT_CT=m
109CONFIG_NFT_SET_RBTREE=m 112CONFIG_NFT_SET_RBTREE=m
110CONFIG_NFT_SET_HASH=m 113CONFIG_NFT_SET_HASH=m
114CONFIG_NFT_SET_BITMAP=m
111CONFIG_NFT_COUNTER=m 115CONFIG_NFT_COUNTER=m
112CONFIG_NFT_LOG=m 116CONFIG_NFT_LOG=m
113CONFIG_NFT_LIMIT=m 117CONFIG_NFT_LIMIT=m
@@ -308,6 +312,8 @@ CONFIG_MPLS_IPTUNNEL=m
308CONFIG_NET_L3_MASTER_DEV=y 312CONFIG_NET_L3_MASTER_DEV=y
309CONFIG_AF_KCM=m 313CONFIG_AF_KCM=m
310# CONFIG_WIRELESS is not set 314# CONFIG_WIRELESS is not set
315CONFIG_PSAMPLE=m
316CONFIG_NET_IFE=m
311CONFIG_NET_DEVLINK=m 317CONFIG_NET_DEVLINK=m
312# CONFIG_UEVENT_HELPER is not set 318# CONFIG_UEVENT_HELPER is not set
313CONFIG_DEVTMPFS=y 319CONFIG_DEVTMPFS=y
@@ -402,6 +408,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
402CONFIG_MACVLAN=m 408CONFIG_MACVLAN=m
403CONFIG_MACVTAP=m 409CONFIG_MACVTAP=m
404CONFIG_IPVLAN=m 410CONFIG_IPVLAN=m
411CONFIG_IPVTAP=m
405CONFIG_VXLAN=m 412CONFIG_VXLAN=m
406CONFIG_GENEVE=m 413CONFIG_GENEVE=m
407CONFIG_GTP=m 414CONFIG_GTP=m
@@ -419,6 +426,7 @@ CONFIG_HPLANCE=y
419CONFIG_MVME147_NET=y 426CONFIG_MVME147_NET=y
420CONFIG_SUN3LANCE=y 427CONFIG_SUN3LANCE=y
421CONFIG_MACMACE=y 428CONFIG_MACMACE=y
429# CONFIG_NET_VENDOR_AQUANTIA is not set
422# CONFIG_NET_VENDOR_ARC is not set 430# CONFIG_NET_VENDOR_ARC is not set
423# CONFIG_NET_CADENCE is not set 431# CONFIG_NET_CADENCE is not set
424# CONFIG_NET_VENDOR_BROADCOM is not set 432# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -444,7 +452,6 @@ CONFIG_ZORRO8390=y
444# CONFIG_NET_VENDOR_SOLARFLARE is not set 452# CONFIG_NET_VENDOR_SOLARFLARE is not set
445CONFIG_SMC91X=y 453CONFIG_SMC91X=y
446# CONFIG_NET_VENDOR_STMICRO is not set 454# CONFIG_NET_VENDOR_STMICRO is not set
447# CONFIG_NET_VENDOR_SYNOPSYS is not set
448# CONFIG_NET_VENDOR_VIA is not set 455# CONFIG_NET_VENDOR_VIA is not set
449# CONFIG_NET_VENDOR_WIZNET is not set 456# CONFIG_NET_VENDOR_WIZNET is not set
450CONFIG_PLIP=m 457CONFIG_PLIP=m
@@ -627,6 +634,8 @@ CONFIG_NLS_MAC_TURKISH=m
627CONFIG_DLM=m 634CONFIG_DLM=m
628# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 635# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
629CONFIG_MAGIC_SYSRQ=y 636CONFIG_MAGIC_SYSRQ=y
637CONFIG_WW_MUTEX_SELFTEST=m
638CONFIG_ATOMIC64_SELFTEST=m
630CONFIG_ASYNC_RAID6_TEST=m 639CONFIG_ASYNC_RAID6_TEST=m
631CONFIG_TEST_HEXDUMP=m 640CONFIG_TEST_HEXDUMP=m
632CONFIG_TEST_STRING_HELPERS=m 641CONFIG_TEST_STRING_HELPERS=m
@@ -657,6 +666,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
657CONFIG_CRYPTO_LRW=m 666CONFIG_CRYPTO_LRW=m
658CONFIG_CRYPTO_PCBC=m 667CONFIG_CRYPTO_PCBC=m
659CONFIG_CRYPTO_KEYWRAP=m 668CONFIG_CRYPTO_KEYWRAP=m
669CONFIG_CRYPTO_CMAC=m
660CONFIG_CRYPTO_XCBC=m 670CONFIG_CRYPTO_XCBC=m
661CONFIG_CRYPTO_VMAC=m 671CONFIG_CRYPTO_VMAC=m
662CONFIG_CRYPTO_MICHAEL_MIC=m 672CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -668,6 +678,7 @@ CONFIG_CRYPTO_SHA512=m
668CONFIG_CRYPTO_SHA3=m 678CONFIG_CRYPTO_SHA3=m
669CONFIG_CRYPTO_TGR192=m 679CONFIG_CRYPTO_TGR192=m
670CONFIG_CRYPTO_WP512=m 680CONFIG_CRYPTO_WP512=m
681CONFIG_CRYPTO_AES_TI=m
671CONFIG_CRYPTO_ANUBIS=m 682CONFIG_CRYPTO_ANUBIS=m
672CONFIG_CRYPTO_BLOWFISH=m 683CONFIG_CRYPTO_BLOWFISH=m
673CONFIG_CRYPTO_CAMELLIA=m 684CONFIG_CRYPTO_CAMELLIA=m
@@ -692,4 +703,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
692CONFIG_CRYPTO_USER_API_RNG=m 703CONFIG_CRYPTO_USER_API_RNG=m
693CONFIG_CRYPTO_USER_API_AEAD=m 704CONFIG_CRYPTO_USER_API_AEAD=m
694# CONFIG_CRYPTO_HW is not set 705# CONFIG_CRYPTO_HW is not set
706CONFIG_CRC32_SELFTEST=m
695CONFIG_XZ_DEC_TEST=m 707CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index c743dd22e96f..b2e687a0ec3d 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
25CONFIG_SUN_PARTITION=y 25CONFIG_SUN_PARTITION=y
26# CONFIG_EFI_PARTITION is not set 26# CONFIG_EFI_PARTITION is not set
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_M68030=y 31CONFIG_M68030=y
@@ -55,6 +56,7 @@ CONFIG_NET_IPVTI=m
55CONFIG_NET_FOU_IP_TUNNELS=y 56CONFIG_NET_FOU_IP_TUNNELS=y
56CONFIG_INET_AH=m 57CONFIG_INET_AH=m
57CONFIG_INET_ESP=m 58CONFIG_INET_ESP=m
59CONFIG_INET_ESP_OFFLOAD=m
58CONFIG_INET_IPCOMP=m 60CONFIG_INET_IPCOMP=m
59CONFIG_INET_XFRM_MODE_TRANSPORT=m 61CONFIG_INET_XFRM_MODE_TRANSPORT=m
60CONFIG_INET_XFRM_MODE_TUNNEL=m 62CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -66,6 +68,7 @@ CONFIG_IPV6=m
66CONFIG_IPV6_ROUTER_PREF=y 68CONFIG_IPV6_ROUTER_PREF=y
67CONFIG_INET6_AH=m 69CONFIG_INET6_AH=m
68CONFIG_INET6_ESP=m 70CONFIG_INET6_ESP=m
71CONFIG_INET6_ESP_OFFLOAD=m
69CONFIG_INET6_IPCOMP=m 72CONFIG_INET6_IPCOMP=m
70CONFIG_IPV6_ILA=m 73CONFIG_IPV6_ILA=m
71CONFIG_IPV6_VTI=m 74CONFIG_IPV6_VTI=m
@@ -96,6 +99,7 @@ CONFIG_NFT_NUMGEN=m
96CONFIG_NFT_CT=m 99CONFIG_NFT_CT=m
97CONFIG_NFT_SET_RBTREE=m 100CONFIG_NFT_SET_RBTREE=m
98CONFIG_NFT_SET_HASH=m 101CONFIG_NFT_SET_HASH=m
102CONFIG_NFT_SET_BITMAP=m
99CONFIG_NFT_COUNTER=m 103CONFIG_NFT_COUNTER=m
100CONFIG_NFT_LOG=m 104CONFIG_NFT_LOG=m
101CONFIG_NFT_LIMIT=m 105CONFIG_NFT_LIMIT=m
@@ -293,6 +297,8 @@ CONFIG_MPLS_IPTUNNEL=m
293CONFIG_NET_L3_MASTER_DEV=y 297CONFIG_NET_L3_MASTER_DEV=y
294CONFIG_AF_KCM=m 298CONFIG_AF_KCM=m
295# CONFIG_WIRELESS is not set 299# CONFIG_WIRELESS is not set
300CONFIG_PSAMPLE=m
301CONFIG_NET_IFE=m
296CONFIG_NET_DEVLINK=m 302CONFIG_NET_DEVLINK=m
297# CONFIG_UEVENT_HELPER is not set 303# CONFIG_UEVENT_HELPER is not set
298CONFIG_DEVTMPFS=y 304CONFIG_DEVTMPFS=y
@@ -351,6 +357,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
351CONFIG_MACVLAN=m 357CONFIG_MACVLAN=m
352CONFIG_MACVTAP=m 358CONFIG_MACVTAP=m
353CONFIG_IPVLAN=m 359CONFIG_IPVLAN=m
360CONFIG_IPVTAP=m
354CONFIG_VXLAN=m 361CONFIG_VXLAN=m
355CONFIG_GENEVE=m 362CONFIG_GENEVE=m
356CONFIG_GTP=m 363CONFIG_GTP=m
@@ -361,6 +368,7 @@ CONFIG_VETH=m
361# CONFIG_NET_VENDOR_ALACRITECH is not set 368# CONFIG_NET_VENDOR_ALACRITECH is not set
362# CONFIG_NET_VENDOR_AMAZON is not set 369# CONFIG_NET_VENDOR_AMAZON is not set
363CONFIG_MVME147_NET=y 370CONFIG_MVME147_NET=y
371# CONFIG_NET_VENDOR_AQUANTIA is not set
364# CONFIG_NET_VENDOR_ARC is not set 372# CONFIG_NET_VENDOR_ARC is not set
365# CONFIG_NET_CADENCE is not set 373# CONFIG_NET_CADENCE is not set
366# CONFIG_NET_VENDOR_BROADCOM is not set 374# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -377,7 +385,6 @@ CONFIG_MVME147_NET=y
377# CONFIG_NET_VENDOR_SEEQ is not set 385# CONFIG_NET_VENDOR_SEEQ is not set
378# CONFIG_NET_VENDOR_SOLARFLARE is not set 386# CONFIG_NET_VENDOR_SOLARFLARE is not set
379# CONFIG_NET_VENDOR_STMICRO is not set 387# CONFIG_NET_VENDOR_STMICRO is not set
380# CONFIG_NET_VENDOR_SYNOPSYS is not set
381# CONFIG_NET_VENDOR_VIA is not set 388# CONFIG_NET_VENDOR_VIA is not set
382# CONFIG_NET_VENDOR_WIZNET is not set 389# CONFIG_NET_VENDOR_WIZNET is not set
383CONFIG_PPP=m 390CONFIG_PPP=m
@@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m
515CONFIG_DLM=m 522CONFIG_DLM=m
516# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 523# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
517CONFIG_MAGIC_SYSRQ=y 524CONFIG_MAGIC_SYSRQ=y
525CONFIG_WW_MUTEX_SELFTEST=m
526CONFIG_ATOMIC64_SELFTEST=m
518CONFIG_ASYNC_RAID6_TEST=m 527CONFIG_ASYNC_RAID6_TEST=m
519CONFIG_TEST_HEXDUMP=m 528CONFIG_TEST_HEXDUMP=m
520CONFIG_TEST_STRING_HELPERS=m 529CONFIG_TEST_STRING_HELPERS=m
@@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
545CONFIG_CRYPTO_LRW=m 554CONFIG_CRYPTO_LRW=m
546CONFIG_CRYPTO_PCBC=m 555CONFIG_CRYPTO_PCBC=m
547CONFIG_CRYPTO_KEYWRAP=m 556CONFIG_CRYPTO_KEYWRAP=m
557CONFIG_CRYPTO_CMAC=m
548CONFIG_CRYPTO_XCBC=m 558CONFIG_CRYPTO_XCBC=m
549CONFIG_CRYPTO_VMAC=m 559CONFIG_CRYPTO_VMAC=m
550CONFIG_CRYPTO_MICHAEL_MIC=m 560CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m
556CONFIG_CRYPTO_SHA3=m 566CONFIG_CRYPTO_SHA3=m
557CONFIG_CRYPTO_TGR192=m 567CONFIG_CRYPTO_TGR192=m
558CONFIG_CRYPTO_WP512=m 568CONFIG_CRYPTO_WP512=m
569CONFIG_CRYPTO_AES_TI=m
559CONFIG_CRYPTO_ANUBIS=m 570CONFIG_CRYPTO_ANUBIS=m
560CONFIG_CRYPTO_BLOWFISH=m 571CONFIG_CRYPTO_BLOWFISH=m
561CONFIG_CRYPTO_CAMELLIA=m 572CONFIG_CRYPTO_CAMELLIA=m
@@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
580CONFIG_CRYPTO_USER_API_RNG=m 591CONFIG_CRYPTO_USER_API_RNG=m
581CONFIG_CRYPTO_USER_API_AEAD=m 592CONFIG_CRYPTO_USER_API_AEAD=m
582# CONFIG_CRYPTO_HW is not set 593# CONFIG_CRYPTO_HW is not set
594CONFIG_CRC32_SELFTEST=m
583CONFIG_XZ_DEC_TEST=m 595CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 2ccaca858f05..cbd8ee24d1bc 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
25CONFIG_SUN_PARTITION=y 25CONFIG_SUN_PARTITION=y
26# CONFIG_EFI_PARTITION is not set 26# CONFIG_EFI_PARTITION is not set
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_M68040=y 31CONFIG_M68040=y
@@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m
56CONFIG_NET_FOU_IP_TUNNELS=y 57CONFIG_NET_FOU_IP_TUNNELS=y
57CONFIG_INET_AH=m 58CONFIG_INET_AH=m
58CONFIG_INET_ESP=m 59CONFIG_INET_ESP=m
60CONFIG_INET_ESP_OFFLOAD=m
59CONFIG_INET_IPCOMP=m 61CONFIG_INET_IPCOMP=m
60CONFIG_INET_XFRM_MODE_TRANSPORT=m 62CONFIG_INET_XFRM_MODE_TRANSPORT=m
61CONFIG_INET_XFRM_MODE_TUNNEL=m 63CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -67,6 +69,7 @@ CONFIG_IPV6=m
67CONFIG_IPV6_ROUTER_PREF=y 69CONFIG_IPV6_ROUTER_PREF=y
68CONFIG_INET6_AH=m 70CONFIG_INET6_AH=m
69CONFIG_INET6_ESP=m 71CONFIG_INET6_ESP=m
72CONFIG_INET6_ESP_OFFLOAD=m
70CONFIG_INET6_IPCOMP=m 73CONFIG_INET6_IPCOMP=m
71CONFIG_IPV6_ILA=m 74CONFIG_IPV6_ILA=m
72CONFIG_IPV6_VTI=m 75CONFIG_IPV6_VTI=m
@@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m
97CONFIG_NFT_CT=m 100CONFIG_NFT_CT=m
98CONFIG_NFT_SET_RBTREE=m 101CONFIG_NFT_SET_RBTREE=m
99CONFIG_NFT_SET_HASH=m 102CONFIG_NFT_SET_HASH=m
103CONFIG_NFT_SET_BITMAP=m
100CONFIG_NFT_COUNTER=m 104CONFIG_NFT_COUNTER=m
101CONFIG_NFT_LOG=m 105CONFIG_NFT_LOG=m
102CONFIG_NFT_LIMIT=m 106CONFIG_NFT_LIMIT=m
@@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m
294CONFIG_NET_L3_MASTER_DEV=y 298CONFIG_NET_L3_MASTER_DEV=y
295CONFIG_AF_KCM=m 299CONFIG_AF_KCM=m
296# CONFIG_WIRELESS is not set 300# CONFIG_WIRELESS is not set
301CONFIG_PSAMPLE=m
302CONFIG_NET_IFE=m
297CONFIG_NET_DEVLINK=m 303CONFIG_NET_DEVLINK=m
298# CONFIG_UEVENT_HELPER is not set 304# CONFIG_UEVENT_HELPER is not set
299CONFIG_DEVTMPFS=y 305CONFIG_DEVTMPFS=y
@@ -352,6 +358,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
352CONFIG_MACVLAN=m 358CONFIG_MACVLAN=m
353CONFIG_MACVTAP=m 359CONFIG_MACVTAP=m
354CONFIG_IPVLAN=m 360CONFIG_IPVLAN=m
361CONFIG_IPVTAP=m
355CONFIG_VXLAN=m 362CONFIG_VXLAN=m
356CONFIG_GENEVE=m 363CONFIG_GENEVE=m
357CONFIG_GTP=m 364CONFIG_GTP=m
@@ -361,6 +368,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y
361CONFIG_VETH=m 368CONFIG_VETH=m
362# CONFIG_NET_VENDOR_ALACRITECH is not set 369# CONFIG_NET_VENDOR_ALACRITECH is not set
363# CONFIG_NET_VENDOR_AMAZON is not set 370# CONFIG_NET_VENDOR_AMAZON is not set
371# CONFIG_NET_VENDOR_AQUANTIA is not set
364# CONFIG_NET_VENDOR_ARC is not set 372# CONFIG_NET_VENDOR_ARC is not set
365# CONFIG_NET_CADENCE is not set 373# CONFIG_NET_CADENCE is not set
366# CONFIG_NET_VENDOR_BROADCOM is not set 374# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -377,7 +385,6 @@ CONFIG_MVME16x_NET=y
377# CONFIG_NET_VENDOR_SEEQ is not set 385# CONFIG_NET_VENDOR_SEEQ is not set
378# CONFIG_NET_VENDOR_SOLARFLARE is not set 386# CONFIG_NET_VENDOR_SOLARFLARE is not set
379# CONFIG_NET_VENDOR_STMICRO is not set 387# CONFIG_NET_VENDOR_STMICRO is not set
380# CONFIG_NET_VENDOR_SYNOPSYS is not set
381# CONFIG_NET_VENDOR_VIA is not set 388# CONFIG_NET_VENDOR_VIA is not set
382# CONFIG_NET_VENDOR_WIZNET is not set 389# CONFIG_NET_VENDOR_WIZNET is not set
383CONFIG_PPP=m 390CONFIG_PPP=m
@@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m
515CONFIG_DLM=m 522CONFIG_DLM=m
516# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 523# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
517CONFIG_MAGIC_SYSRQ=y 524CONFIG_MAGIC_SYSRQ=y
525CONFIG_WW_MUTEX_SELFTEST=m
526CONFIG_ATOMIC64_SELFTEST=m
518CONFIG_ASYNC_RAID6_TEST=m 527CONFIG_ASYNC_RAID6_TEST=m
519CONFIG_TEST_HEXDUMP=m 528CONFIG_TEST_HEXDUMP=m
520CONFIG_TEST_STRING_HELPERS=m 529CONFIG_TEST_STRING_HELPERS=m
@@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
545CONFIG_CRYPTO_LRW=m 554CONFIG_CRYPTO_LRW=m
546CONFIG_CRYPTO_PCBC=m 555CONFIG_CRYPTO_PCBC=m
547CONFIG_CRYPTO_KEYWRAP=m 556CONFIG_CRYPTO_KEYWRAP=m
557CONFIG_CRYPTO_CMAC=m
548CONFIG_CRYPTO_XCBC=m 558CONFIG_CRYPTO_XCBC=m
549CONFIG_CRYPTO_VMAC=m 559CONFIG_CRYPTO_VMAC=m
550CONFIG_CRYPTO_MICHAEL_MIC=m 560CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m
556CONFIG_CRYPTO_SHA3=m 566CONFIG_CRYPTO_SHA3=m
557CONFIG_CRYPTO_TGR192=m 567CONFIG_CRYPTO_TGR192=m
558CONFIG_CRYPTO_WP512=m 568CONFIG_CRYPTO_WP512=m
569CONFIG_CRYPTO_AES_TI=m
559CONFIG_CRYPTO_ANUBIS=m 570CONFIG_CRYPTO_ANUBIS=m
560CONFIG_CRYPTO_BLOWFISH=m 571CONFIG_CRYPTO_BLOWFISH=m
561CONFIG_CRYPTO_CAMELLIA=m 572CONFIG_CRYPTO_CAMELLIA=m
@@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
580CONFIG_CRYPTO_USER_API_RNG=m 591CONFIG_CRYPTO_USER_API_RNG=m
581CONFIG_CRYPTO_USER_API_AEAD=m 592CONFIG_CRYPTO_USER_API_AEAD=m
582# CONFIG_CRYPTO_HW is not set 593# CONFIG_CRYPTO_HW is not set
594CONFIG_CRC32_SELFTEST=m
583CONFIG_XZ_DEC_TEST=m 595CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 5599f3fd5fcd..1e82cc944339 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y
26# CONFIG_EFI_PARTITION is not set 26# CONFIG_EFI_PARTITION is not set
27CONFIG_SYSV68_PARTITION=y 27CONFIG_SYSV68_PARTITION=y
28CONFIG_IOSCHED_DEADLINE=m 28CONFIG_IOSCHED_DEADLINE=m
29CONFIG_MQ_IOSCHED_DEADLINE=m
29CONFIG_KEXEC=y 30CONFIG_KEXEC=y
30CONFIG_BOOTINFO_PROC=y 31CONFIG_BOOTINFO_PROC=y
31CONFIG_M68040=y 32CONFIG_M68040=y
@@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m
56CONFIG_NET_FOU_IP_TUNNELS=y 57CONFIG_NET_FOU_IP_TUNNELS=y
57CONFIG_INET_AH=m 58CONFIG_INET_AH=m
58CONFIG_INET_ESP=m 59CONFIG_INET_ESP=m
60CONFIG_INET_ESP_OFFLOAD=m
59CONFIG_INET_IPCOMP=m 61CONFIG_INET_IPCOMP=m
60CONFIG_INET_XFRM_MODE_TRANSPORT=m 62CONFIG_INET_XFRM_MODE_TRANSPORT=m
61CONFIG_INET_XFRM_MODE_TUNNEL=m 63CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -67,6 +69,7 @@ CONFIG_IPV6=m
67CONFIG_IPV6_ROUTER_PREF=y 69CONFIG_IPV6_ROUTER_PREF=y
68CONFIG_INET6_AH=m 70CONFIG_INET6_AH=m
69CONFIG_INET6_ESP=m 71CONFIG_INET6_ESP=m
72CONFIG_INET6_ESP_OFFLOAD=m
70CONFIG_INET6_IPCOMP=m 73CONFIG_INET6_IPCOMP=m
71CONFIG_IPV6_ILA=m 74CONFIG_IPV6_ILA=m
72CONFIG_IPV6_VTI=m 75CONFIG_IPV6_VTI=m
@@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m
97CONFIG_NFT_CT=m 100CONFIG_NFT_CT=m
98CONFIG_NFT_SET_RBTREE=m 101CONFIG_NFT_SET_RBTREE=m
99CONFIG_NFT_SET_HASH=m 102CONFIG_NFT_SET_HASH=m
103CONFIG_NFT_SET_BITMAP=m
100CONFIG_NFT_COUNTER=m 104CONFIG_NFT_COUNTER=m
101CONFIG_NFT_LOG=m 105CONFIG_NFT_LOG=m
102CONFIG_NFT_LIMIT=m 106CONFIG_NFT_LIMIT=m
@@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m
294CONFIG_NET_L3_MASTER_DEV=y 298CONFIG_NET_L3_MASTER_DEV=y
295CONFIG_AF_KCM=m 299CONFIG_AF_KCM=m
296# CONFIG_WIRELESS is not set 300# CONFIG_WIRELESS is not set
301CONFIG_PSAMPLE=m
302CONFIG_NET_IFE=m
297CONFIG_NET_DEVLINK=m 303CONFIG_NET_DEVLINK=m
298# CONFIG_UEVENT_HELPER is not set 304# CONFIG_UEVENT_HELPER is not set
299CONFIG_DEVTMPFS=y 305CONFIG_DEVTMPFS=y
@@ -358,6 +364,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
358CONFIG_MACVLAN=m 364CONFIG_MACVLAN=m
359CONFIG_MACVTAP=m 365CONFIG_MACVTAP=m
360CONFIG_IPVLAN=m 366CONFIG_IPVLAN=m
367CONFIG_IPVTAP=m
361CONFIG_VXLAN=m 368CONFIG_VXLAN=m
362CONFIG_GENEVE=m 369CONFIG_GENEVE=m
363CONFIG_GTP=m 370CONFIG_GTP=m
@@ -369,6 +376,7 @@ CONFIG_VETH=m
369# CONFIG_NET_VENDOR_ALACRITECH is not set 376# CONFIG_NET_VENDOR_ALACRITECH is not set
370# CONFIG_NET_VENDOR_AMAZON is not set 377# CONFIG_NET_VENDOR_AMAZON is not set
371# CONFIG_NET_VENDOR_AMD is not set 378# CONFIG_NET_VENDOR_AMD is not set
379# CONFIG_NET_VENDOR_AQUANTIA is not set
372# CONFIG_NET_VENDOR_ARC is not set 380# CONFIG_NET_VENDOR_ARC is not set
373# CONFIG_NET_CADENCE is not set 381# CONFIG_NET_CADENCE is not set
374# CONFIG_NET_VENDOR_BROADCOM is not set 382# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -388,7 +396,6 @@ CONFIG_NE2000=y
388# CONFIG_NET_VENDOR_SOLARFLARE is not set 396# CONFIG_NET_VENDOR_SOLARFLARE is not set
389# CONFIG_NET_VENDOR_SMSC is not set 397# CONFIG_NET_VENDOR_SMSC is not set
390# CONFIG_NET_VENDOR_STMICRO is not set 398# CONFIG_NET_VENDOR_STMICRO is not set
391# CONFIG_NET_VENDOR_SYNOPSYS is not set
392# CONFIG_NET_VENDOR_VIA is not set 399# CONFIG_NET_VENDOR_VIA is not set
393# CONFIG_NET_VENDOR_WIZNET is not set 400# CONFIG_NET_VENDOR_WIZNET is not set
394CONFIG_PLIP=m 401CONFIG_PLIP=m
@@ -538,6 +545,8 @@ CONFIG_NLS_MAC_TURKISH=m
538CONFIG_DLM=m 545CONFIG_DLM=m
539# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 546# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
540CONFIG_MAGIC_SYSRQ=y 547CONFIG_MAGIC_SYSRQ=y
548CONFIG_WW_MUTEX_SELFTEST=m
549CONFIG_ATOMIC64_SELFTEST=m
541CONFIG_ASYNC_RAID6_TEST=m 550CONFIG_ASYNC_RAID6_TEST=m
542CONFIG_TEST_HEXDUMP=m 551CONFIG_TEST_HEXDUMP=m
543CONFIG_TEST_STRING_HELPERS=m 552CONFIG_TEST_STRING_HELPERS=m
@@ -568,6 +577,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
568CONFIG_CRYPTO_LRW=m 577CONFIG_CRYPTO_LRW=m
569CONFIG_CRYPTO_PCBC=m 578CONFIG_CRYPTO_PCBC=m
570CONFIG_CRYPTO_KEYWRAP=m 579CONFIG_CRYPTO_KEYWRAP=m
580CONFIG_CRYPTO_CMAC=m
571CONFIG_CRYPTO_XCBC=m 581CONFIG_CRYPTO_XCBC=m
572CONFIG_CRYPTO_VMAC=m 582CONFIG_CRYPTO_VMAC=m
573CONFIG_CRYPTO_MICHAEL_MIC=m 583CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -579,6 +589,7 @@ CONFIG_CRYPTO_SHA512=m
579CONFIG_CRYPTO_SHA3=m 589CONFIG_CRYPTO_SHA3=m
580CONFIG_CRYPTO_TGR192=m 590CONFIG_CRYPTO_TGR192=m
581CONFIG_CRYPTO_WP512=m 591CONFIG_CRYPTO_WP512=m
592CONFIG_CRYPTO_AES_TI=m
582CONFIG_CRYPTO_ANUBIS=m 593CONFIG_CRYPTO_ANUBIS=m
583CONFIG_CRYPTO_BLOWFISH=m 594CONFIG_CRYPTO_BLOWFISH=m
584CONFIG_CRYPTO_CAMELLIA=m 595CONFIG_CRYPTO_CAMELLIA=m
@@ -603,4 +614,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
603CONFIG_CRYPTO_USER_API_RNG=m 614CONFIG_CRYPTO_USER_API_RNG=m
604CONFIG_CRYPTO_USER_API_AEAD=m 615CONFIG_CRYPTO_USER_API_AEAD=m
605# CONFIG_CRYPTO_HW is not set 616# CONFIG_CRYPTO_HW is not set
617CONFIG_CRC32_SELFTEST=m
606CONFIG_XZ_DEC_TEST=m 618CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 313bf0a562ad..f9e77f57a972 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
25# CONFIG_EFI_PARTITION is not set 25# CONFIG_EFI_PARTITION is not set
26CONFIG_SYSV68_PARTITION=y 26CONFIG_SYSV68_PARTITION=y
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_SUN3=y 31CONFIG_SUN3=y
@@ -53,6 +54,7 @@ CONFIG_NET_IPVTI=m
53CONFIG_NET_FOU_IP_TUNNELS=y 54CONFIG_NET_FOU_IP_TUNNELS=y
54CONFIG_INET_AH=m 55CONFIG_INET_AH=m
55CONFIG_INET_ESP=m 56CONFIG_INET_ESP=m
57CONFIG_INET_ESP_OFFLOAD=m
56CONFIG_INET_IPCOMP=m 58CONFIG_INET_IPCOMP=m
57CONFIG_INET_XFRM_MODE_TRANSPORT=m 59CONFIG_INET_XFRM_MODE_TRANSPORT=m
58CONFIG_INET_XFRM_MODE_TUNNEL=m 60CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -64,6 +66,7 @@ CONFIG_IPV6=m
64CONFIG_IPV6_ROUTER_PREF=y 66CONFIG_IPV6_ROUTER_PREF=y
65CONFIG_INET6_AH=m 67CONFIG_INET6_AH=m
66CONFIG_INET6_ESP=m 68CONFIG_INET6_ESP=m
69CONFIG_INET6_ESP_OFFLOAD=m
67CONFIG_INET6_IPCOMP=m 70CONFIG_INET6_IPCOMP=m
68CONFIG_IPV6_ILA=m 71CONFIG_IPV6_ILA=m
69CONFIG_IPV6_VTI=m 72CONFIG_IPV6_VTI=m
@@ -94,6 +97,7 @@ CONFIG_NFT_NUMGEN=m
94CONFIG_NFT_CT=m 97CONFIG_NFT_CT=m
95CONFIG_NFT_SET_RBTREE=m 98CONFIG_NFT_SET_RBTREE=m
96CONFIG_NFT_SET_HASH=m 99CONFIG_NFT_SET_HASH=m
100CONFIG_NFT_SET_BITMAP=m
97CONFIG_NFT_COUNTER=m 101CONFIG_NFT_COUNTER=m
98CONFIG_NFT_LOG=m 102CONFIG_NFT_LOG=m
99CONFIG_NFT_LIMIT=m 103CONFIG_NFT_LIMIT=m
@@ -291,6 +295,8 @@ CONFIG_MPLS_IPTUNNEL=m
291CONFIG_NET_L3_MASTER_DEV=y 295CONFIG_NET_L3_MASTER_DEV=y
292CONFIG_AF_KCM=m 296CONFIG_AF_KCM=m
293# CONFIG_WIRELESS is not set 297# CONFIG_WIRELESS is not set
298CONFIG_PSAMPLE=m
299CONFIG_NET_IFE=m
294CONFIG_NET_DEVLINK=m 300CONFIG_NET_DEVLINK=m
295# CONFIG_UEVENT_HELPER is not set 301# CONFIG_UEVENT_HELPER is not set
296CONFIG_DEVTMPFS=y 302CONFIG_DEVTMPFS=y
@@ -349,6 +355,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
349CONFIG_MACVLAN=m 355CONFIG_MACVLAN=m
350CONFIG_MACVTAP=m 356CONFIG_MACVTAP=m
351CONFIG_IPVLAN=m 357CONFIG_IPVLAN=m
358CONFIG_IPVTAP=m
352CONFIG_VXLAN=m 359CONFIG_VXLAN=m
353CONFIG_GENEVE=m 360CONFIG_GENEVE=m
354CONFIG_GTP=m 361CONFIG_GTP=m
@@ -359,6 +366,7 @@ CONFIG_VETH=m
359# CONFIG_NET_VENDOR_ALACRITECH is not set 366# CONFIG_NET_VENDOR_ALACRITECH is not set
360# CONFIG_NET_VENDOR_AMAZON is not set 367# CONFIG_NET_VENDOR_AMAZON is not set
361CONFIG_SUN3LANCE=y 368CONFIG_SUN3LANCE=y
369# CONFIG_NET_VENDOR_AQUANTIA is not set
362# CONFIG_NET_VENDOR_ARC is not set 370# CONFIG_NET_VENDOR_ARC is not set
363# CONFIG_NET_CADENCE is not set 371# CONFIG_NET_CADENCE is not set
364# CONFIG_NET_VENDOR_EZCHIP is not set 372# CONFIG_NET_VENDOR_EZCHIP is not set
@@ -375,7 +383,6 @@ CONFIG_SUN3_82586=y
375# CONFIG_NET_VENDOR_SOLARFLARE is not set 383# CONFIG_NET_VENDOR_SOLARFLARE is not set
376# CONFIG_NET_VENDOR_STMICRO is not set 384# CONFIG_NET_VENDOR_STMICRO is not set
377# CONFIG_NET_VENDOR_SUN is not set 385# CONFIG_NET_VENDOR_SUN is not set
378# CONFIG_NET_VENDOR_SYNOPSYS is not set
379# CONFIG_NET_VENDOR_VIA is not set 386# CONFIG_NET_VENDOR_VIA is not set
380# CONFIG_NET_VENDOR_WIZNET is not set 387# CONFIG_NET_VENDOR_WIZNET is not set
381CONFIG_PPP=m 388CONFIG_PPP=m
@@ -517,6 +524,8 @@ CONFIG_NLS_MAC_TURKISH=m
517CONFIG_DLM=m 524CONFIG_DLM=m
518# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 525# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
519CONFIG_MAGIC_SYSRQ=y 526CONFIG_MAGIC_SYSRQ=y
527CONFIG_WW_MUTEX_SELFTEST=m
528CONFIG_ATOMIC64_SELFTEST=m
520CONFIG_ASYNC_RAID6_TEST=m 529CONFIG_ASYNC_RAID6_TEST=m
521CONFIG_TEST_HEXDUMP=m 530CONFIG_TEST_HEXDUMP=m
522CONFIG_TEST_STRING_HELPERS=m 531CONFIG_TEST_STRING_HELPERS=m
@@ -546,6 +555,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
546CONFIG_CRYPTO_LRW=m 555CONFIG_CRYPTO_LRW=m
547CONFIG_CRYPTO_PCBC=m 556CONFIG_CRYPTO_PCBC=m
548CONFIG_CRYPTO_KEYWRAP=m 557CONFIG_CRYPTO_KEYWRAP=m
558CONFIG_CRYPTO_CMAC=m
549CONFIG_CRYPTO_XCBC=m 559CONFIG_CRYPTO_XCBC=m
550CONFIG_CRYPTO_VMAC=m 560CONFIG_CRYPTO_VMAC=m
551CONFIG_CRYPTO_MICHAEL_MIC=m 561CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -557,6 +567,7 @@ CONFIG_CRYPTO_SHA512=m
557CONFIG_CRYPTO_SHA3=m 567CONFIG_CRYPTO_SHA3=m
558CONFIG_CRYPTO_TGR192=m 568CONFIG_CRYPTO_TGR192=m
559CONFIG_CRYPTO_WP512=m 569CONFIG_CRYPTO_WP512=m
570CONFIG_CRYPTO_AES_TI=m
560CONFIG_CRYPTO_ANUBIS=m 571CONFIG_CRYPTO_ANUBIS=m
561CONFIG_CRYPTO_BLOWFISH=m 572CONFIG_CRYPTO_BLOWFISH=m
562CONFIG_CRYPTO_CAMELLIA=m 573CONFIG_CRYPTO_CAMELLIA=m
@@ -581,4 +592,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
581CONFIG_CRYPTO_USER_API_RNG=m 592CONFIG_CRYPTO_USER_API_RNG=m
582CONFIG_CRYPTO_USER_API_AEAD=m 593CONFIG_CRYPTO_USER_API_AEAD=m
583# CONFIG_CRYPTO_HW is not set 594# CONFIG_CRYPTO_HW is not set
595CONFIG_CRC32_SELFTEST=m
584CONFIG_XZ_DEC_TEST=m 596CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 38b61365f769..3c394fcfb368 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
25# CONFIG_EFI_PARTITION is not set 25# CONFIG_EFI_PARTITION is not set
26CONFIG_SYSV68_PARTITION=y 26CONFIG_SYSV68_PARTITION=y
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_SUN3X=y 31CONFIG_SUN3X=y
@@ -53,6 +54,7 @@ CONFIG_NET_IPVTI=m
53CONFIG_NET_FOU_IP_TUNNELS=y 54CONFIG_NET_FOU_IP_TUNNELS=y
54CONFIG_INET_AH=m 55CONFIG_INET_AH=m
55CONFIG_INET_ESP=m 56CONFIG_INET_ESP=m
57CONFIG_INET_ESP_OFFLOAD=m
56CONFIG_INET_IPCOMP=m 58CONFIG_INET_IPCOMP=m
57CONFIG_INET_XFRM_MODE_TRANSPORT=m 59CONFIG_INET_XFRM_MODE_TRANSPORT=m
58CONFIG_INET_XFRM_MODE_TUNNEL=m 60CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -64,6 +66,7 @@ CONFIG_IPV6=m
64CONFIG_IPV6_ROUTER_PREF=y 66CONFIG_IPV6_ROUTER_PREF=y
65CONFIG_INET6_AH=m 67CONFIG_INET6_AH=m
66CONFIG_INET6_ESP=m 68CONFIG_INET6_ESP=m
69CONFIG_INET6_ESP_OFFLOAD=m
67CONFIG_INET6_IPCOMP=m 70CONFIG_INET6_IPCOMP=m
68CONFIG_IPV6_ILA=m 71CONFIG_IPV6_ILA=m
69CONFIG_IPV6_VTI=m 72CONFIG_IPV6_VTI=m
@@ -94,6 +97,7 @@ CONFIG_NFT_NUMGEN=m
94CONFIG_NFT_CT=m 97CONFIG_NFT_CT=m
95CONFIG_NFT_SET_RBTREE=m 98CONFIG_NFT_SET_RBTREE=m
96CONFIG_NFT_SET_HASH=m 99CONFIG_NFT_SET_HASH=m
100CONFIG_NFT_SET_BITMAP=m
97CONFIG_NFT_COUNTER=m 101CONFIG_NFT_COUNTER=m
98CONFIG_NFT_LOG=m 102CONFIG_NFT_LOG=m
99CONFIG_NFT_LIMIT=m 103CONFIG_NFT_LIMIT=m
@@ -291,6 +295,8 @@ CONFIG_MPLS_IPTUNNEL=m
291CONFIG_NET_L3_MASTER_DEV=y 295CONFIG_NET_L3_MASTER_DEV=y
292CONFIG_AF_KCM=m 296CONFIG_AF_KCM=m
293# CONFIG_WIRELESS is not set 297# CONFIG_WIRELESS is not set
298CONFIG_PSAMPLE=m
299CONFIG_NET_IFE=m
294CONFIG_NET_DEVLINK=m 300CONFIG_NET_DEVLINK=m
295# CONFIG_UEVENT_HELPER is not set 301# CONFIG_UEVENT_HELPER is not set
296CONFIG_DEVTMPFS=y 302CONFIG_DEVTMPFS=y
@@ -349,6 +355,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
349CONFIG_MACVLAN=m 355CONFIG_MACVLAN=m
350CONFIG_MACVTAP=m 356CONFIG_MACVTAP=m
351CONFIG_IPVLAN=m 357CONFIG_IPVLAN=m
358CONFIG_IPVTAP=m
352CONFIG_VXLAN=m 359CONFIG_VXLAN=m
353CONFIG_GENEVE=m 360CONFIG_GENEVE=m
354CONFIG_GTP=m 361CONFIG_GTP=m
@@ -359,6 +366,7 @@ CONFIG_VETH=m
359# CONFIG_NET_VENDOR_ALACRITECH is not set 366# CONFIG_NET_VENDOR_ALACRITECH is not set
360# CONFIG_NET_VENDOR_AMAZON is not set 367# CONFIG_NET_VENDOR_AMAZON is not set
361CONFIG_SUN3LANCE=y 368CONFIG_SUN3LANCE=y
369# CONFIG_NET_VENDOR_AQUANTIA is not set
362# CONFIG_NET_VENDOR_ARC is not set 370# CONFIG_NET_VENDOR_ARC is not set
363# CONFIG_NET_CADENCE is not set 371# CONFIG_NET_CADENCE is not set
364# CONFIG_NET_VENDOR_BROADCOM is not set 372# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -375,7 +383,6 @@ CONFIG_SUN3LANCE=y
375# CONFIG_NET_VENDOR_SEEQ is not set 383# CONFIG_NET_VENDOR_SEEQ is not set
376# CONFIG_NET_VENDOR_SOLARFLARE is not set 384# CONFIG_NET_VENDOR_SOLARFLARE is not set
377# CONFIG_NET_VENDOR_STMICRO is not set 385# CONFIG_NET_VENDOR_STMICRO is not set
378# CONFIG_NET_VENDOR_SYNOPSYS is not set
379# CONFIG_NET_VENDOR_VIA is not set 386# CONFIG_NET_VENDOR_VIA is not set
380# CONFIG_NET_VENDOR_WIZNET is not set 387# CONFIG_NET_VENDOR_WIZNET is not set
381CONFIG_PPP=m 388CONFIG_PPP=m
@@ -517,6 +524,8 @@ CONFIG_NLS_MAC_TURKISH=m
517CONFIG_DLM=m 524CONFIG_DLM=m
518# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 525# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
519CONFIG_MAGIC_SYSRQ=y 526CONFIG_MAGIC_SYSRQ=y
527CONFIG_WW_MUTEX_SELFTEST=m
528CONFIG_ATOMIC64_SELFTEST=m
520CONFIG_ASYNC_RAID6_TEST=m 529CONFIG_ASYNC_RAID6_TEST=m
521CONFIG_TEST_HEXDUMP=m 530CONFIG_TEST_HEXDUMP=m
522CONFIG_TEST_STRING_HELPERS=m 531CONFIG_TEST_STRING_HELPERS=m
@@ -547,6 +556,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
547CONFIG_CRYPTO_LRW=m 556CONFIG_CRYPTO_LRW=m
548CONFIG_CRYPTO_PCBC=m 557CONFIG_CRYPTO_PCBC=m
549CONFIG_CRYPTO_KEYWRAP=m 558CONFIG_CRYPTO_KEYWRAP=m
559CONFIG_CRYPTO_CMAC=m
550CONFIG_CRYPTO_XCBC=m 560CONFIG_CRYPTO_XCBC=m
551CONFIG_CRYPTO_VMAC=m 561CONFIG_CRYPTO_VMAC=m
552CONFIG_CRYPTO_MICHAEL_MIC=m 562CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -558,6 +568,7 @@ CONFIG_CRYPTO_SHA512=m
558CONFIG_CRYPTO_SHA3=m 568CONFIG_CRYPTO_SHA3=m
559CONFIG_CRYPTO_TGR192=m 569CONFIG_CRYPTO_TGR192=m
560CONFIG_CRYPTO_WP512=m 570CONFIG_CRYPTO_WP512=m
571CONFIG_CRYPTO_AES_TI=m
561CONFIG_CRYPTO_ANUBIS=m 572CONFIG_CRYPTO_ANUBIS=m
562CONFIG_CRYPTO_BLOWFISH=m 573CONFIG_CRYPTO_BLOWFISH=m
563CONFIG_CRYPTO_CAMELLIA=m 574CONFIG_CRYPTO_CAMELLIA=m
@@ -582,4 +593,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
582CONFIG_CRYPTO_USER_API_RNG=m 593CONFIG_CRYPTO_USER_API_RNG=m
583CONFIG_CRYPTO_USER_API_AEAD=m 594CONFIG_CRYPTO_USER_API_AEAD=m
584# CONFIG_CRYPTO_HW is not set 595# CONFIG_CRYPTO_HW is not set
596CONFIG_CRC32_SELFTEST=m
585CONFIG_XZ_DEC_TEST=m 597CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h
index b4a9b0d5928d..dda58cfe8c22 100644
--- a/arch/m68k/include/asm/bitops.h
+++ b/arch/m68k/include/asm/bitops.h
@@ -148,7 +148,7 @@ static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
148#define __change_bit(nr, vaddr) change_bit(nr, vaddr) 148#define __change_bit(nr, vaddr) change_bit(nr, vaddr)
149 149
150 150
151static inline int test_bit(int nr, const unsigned long *vaddr) 151static inline int test_bit(int nr, const volatile unsigned long *vaddr)
152{ 152{
153 return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; 153 return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
154} 154}
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index a857d82ec509..aab1edd0d4ba 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
4#include <uapi/asm/unistd.h> 4#include <uapi/asm/unistd.h>
5 5
6 6
7#define NR_syscalls 379 7#define NR_syscalls 380
8 8
9#define __ARCH_WANT_OLD_READDIR 9#define __ARCH_WANT_OLD_READDIR
10#define __ARCH_WANT_OLD_STAT 10#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 9fe674bf911f..25589f5b8669 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -384,5 +384,6 @@
384#define __NR_copy_file_range 376 384#define __NR_copy_file_range 376
385#define __NR_preadv2 377 385#define __NR_preadv2 377
386#define __NR_pwritev2 378 386#define __NR_pwritev2 378
387#define __NR_statx 379
387 388
388#endif /* _UAPI_ASM_M68K_UNISTD_H_ */ 389#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index d6fd6d9ced24..8c9fcfafe0dd 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -399,3 +399,4 @@ ENTRY(sys_call_table)
399 .long sys_copy_file_range 399 .long sys_copy_file_range
400 .long sys_preadv2 400 .long sys_preadv2
401 .long sys_pwritev2 401 .long sys_pwritev2
402 .long sys_statx
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
index 273e61225c27..07238b39638c 100644
--- a/arch/metag/include/asm/uaccess.h
+++ b/arch/metag/include/asm/uaccess.h
@@ -197,20 +197,21 @@ extern long __must_check strnlen_user(const char __user *src, long count);
197 197
198#define strlen_user(str) strnlen_user(str, 32767) 198#define strlen_user(str) strnlen_user(str, 32767)
199 199
200extern unsigned long __must_check __copy_user_zeroing(void *to, 200extern unsigned long raw_copy_from_user(void *to, const void __user *from,
201 const void __user *from, 201 unsigned long n);
202 unsigned long n);
203 202
204static inline unsigned long 203static inline unsigned long
205copy_from_user(void *to, const void __user *from, unsigned long n) 204copy_from_user(void *to, const void __user *from, unsigned long n)
206{ 205{
206 unsigned long res = n;
207 if (likely(access_ok(VERIFY_READ, from, n))) 207 if (likely(access_ok(VERIFY_READ, from, n)))
208 return __copy_user_zeroing(to, from, n); 208 res = raw_copy_from_user(to, from, n);
209 memset(to, 0, n); 209 if (unlikely(res))
210 return n; 210 memset(to + (n - res), 0, res);
211 return res;
211} 212}
212 213
213#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n) 214#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
214#define __copy_from_user_inatomic __copy_from_user 215#define __copy_from_user_inatomic __copy_from_user
215 216
216extern unsigned long __must_check __copy_user(void __user *to, 217extern unsigned long __must_check __copy_user(void __user *to,
diff --git a/arch/metag/kernel/ptrace.c b/arch/metag/kernel/ptrace.c
index 5fd16ee5280c..e615603a4b0a 100644
--- a/arch/metag/kernel/ptrace.c
+++ b/arch/metag/kernel/ptrace.c
@@ -26,6 +26,16 @@
26 * user_regset definitions. 26 * user_regset definitions.
27 */ 27 */
28 28
29static unsigned long user_txstatus(const struct pt_regs *regs)
30{
31 unsigned long data = (unsigned long)regs->ctx.Flags;
32
33 if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
34 data |= USER_GP_REGS_STATUS_CATCH_BIT;
35
36 return data;
37}
38
29int metag_gp_regs_copyout(const struct pt_regs *regs, 39int metag_gp_regs_copyout(const struct pt_regs *regs,
30 unsigned int pos, unsigned int count, 40 unsigned int pos, unsigned int count,
31 void *kbuf, void __user *ubuf) 41 void *kbuf, void __user *ubuf)
@@ -64,9 +74,7 @@ int metag_gp_regs_copyout(const struct pt_regs *regs,
64 if (ret) 74 if (ret)
65 goto out; 75 goto out;
66 /* TXSTATUS */ 76 /* TXSTATUS */
67 data = (unsigned long)regs->ctx.Flags; 77 data = user_txstatus(regs);
68 if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
69 data |= USER_GP_REGS_STATUS_CATCH_BIT;
70 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 78 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
71 &data, 4*25, 4*26); 79 &data, 4*25, 4*26);
72 if (ret) 80 if (ret)
@@ -121,6 +129,7 @@ int metag_gp_regs_copyin(struct pt_regs *regs,
121 if (ret) 129 if (ret)
122 goto out; 130 goto out;
123 /* TXSTATUS */ 131 /* TXSTATUS */
132 data = user_txstatus(regs);
124 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 133 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
125 &data, 4*25, 4*26); 134 &data, 4*25, 4*26);
126 if (ret) 135 if (ret)
@@ -246,6 +255,8 @@ int metag_rp_state_copyin(struct pt_regs *regs,
246 unsigned long long *ptr; 255 unsigned long long *ptr;
247 int ret, i; 256 int ret, i;
248 257
258 if (count < 4*13)
259 return -EINVAL;
249 /* Read the entire pipeline before making any changes */ 260 /* Read the entire pipeline before making any changes */
250 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 261 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
251 &rp, 0, 4*13); 262 &rp, 0, 4*13);
@@ -305,7 +316,7 @@ static int metag_tls_set(struct task_struct *target,
305 const void *kbuf, const void __user *ubuf) 316 const void *kbuf, const void __user *ubuf)
306{ 317{
307 int ret; 318 int ret;
308 void __user *tls; 319 void __user *tls = target->thread.tls_ptr;
309 320
310 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 321 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
311 if (ret) 322 if (ret)
diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
index b3ebfe9c8e88..2792fc621088 100644
--- a/arch/metag/lib/usercopy.c
+++ b/arch/metag/lib/usercopy.c
@@ -29,7 +29,6 @@
29 COPY \ 29 COPY \
30 "1:\n" \ 30 "1:\n" \
31 " .section .fixup,\"ax\"\n" \ 31 " .section .fixup,\"ax\"\n" \
32 " MOV D1Ar1,#0\n" \
33 FIXUP \ 32 FIXUP \
34 " MOVT D1Ar1,#HI(1b)\n" \ 33 " MOVT D1Ar1,#HI(1b)\n" \
35 " JUMP D1Ar1,#LO(1b)\n" \ 34 " JUMP D1Ar1,#LO(1b)\n" \
@@ -260,27 +259,31 @@
260 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 259 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
261 "22:\n" \ 260 "22:\n" \
262 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 261 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
263 "SUB %3, %3, #32\n" \
264 "23:\n" \ 262 "23:\n" \
265 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 263 "SUB %3, %3, #32\n" \
266 "24:\n" \ 264 "24:\n" \
265 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
266 "25:\n" \
267 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 267 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
268 "26:\n" \
268 "SUB %3, %3, #32\n" \ 269 "SUB %3, %3, #32\n" \
269 "DCACHE [%1+#-64], D0Ar6\n" \ 270 "DCACHE [%1+#-64], D0Ar6\n" \
270 "BR $Lloop"id"\n" \ 271 "BR $Lloop"id"\n" \
271 \ 272 \
272 "MOV RAPF, %1\n" \ 273 "MOV RAPF, %1\n" \
273 "25:\n" \ 274 "27:\n" \
274 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 275 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
275 "26:\n" \ 276 "28:\n" \
276 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 277 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
278 "29:\n" \
277 "SUB %3, %3, #32\n" \ 279 "SUB %3, %3, #32\n" \
278 "27:\n" \ 280 "30:\n" \
279 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 281 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
280 "28:\n" \ 282 "31:\n" \
281 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 283 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
284 "32:\n" \
282 "SUB %0, %0, #8\n" \ 285 "SUB %0, %0, #8\n" \
283 "29:\n" \ 286 "33:\n" \
284 "SETL [%0++], D0.7, D1.7\n" \ 287 "SETL [%0++], D0.7, D1.7\n" \
285 "SUB %3, %3, #32\n" \ 288 "SUB %3, %3, #32\n" \
286 "1:" \ 289 "1:" \
@@ -312,11 +315,15 @@
312 " .long 26b,3b\n" \ 315 " .long 26b,3b\n" \
313 " .long 27b,3b\n" \ 316 " .long 27b,3b\n" \
314 " .long 28b,3b\n" \ 317 " .long 28b,3b\n" \
315 " .long 29b,4b\n" \ 318 " .long 29b,3b\n" \
319 " .long 30b,3b\n" \
320 " .long 31b,3b\n" \
321 " .long 32b,3b\n" \
322 " .long 33b,4b\n" \
316 " .previous\n" \ 323 " .previous\n" \
317 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ 324 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
318 : "0" (to), "1" (from), "2" (ret), "3" (n) \ 325 : "0" (to), "1" (from), "2" (ret), "3" (n) \
319 : "D1Ar1", "D0Ar2", "memory") 326 : "D1Ar1", "D0Ar2", "cc", "memory")
320 327
321/* rewind 'to' and 'from' pointers when a fault occurs 328/* rewind 'to' and 'from' pointers when a fault occurs
322 * 329 *
@@ -342,7 +349,7 @@
342#define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\ 349#define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
343 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ 350 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
344 "LSR D0Ar2, D0Ar2, #8\n" \ 351 "LSR D0Ar2, D0Ar2, #8\n" \
345 "AND D0Ar2, D0Ar2, #0x7\n" \ 352 "ANDS D0Ar2, D0Ar2, #0x7\n" \
346 "ADDZ D0Ar2, D0Ar2, #4\n" \ 353 "ADDZ D0Ar2, D0Ar2, #4\n" \
347 "SUB D0Ar2, D0Ar2, #1\n" \ 354 "SUB D0Ar2, D0Ar2, #1\n" \
348 "MOV D1Ar1, #4\n" \ 355 "MOV D1Ar1, #4\n" \
@@ -403,47 +410,55 @@
403 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 410 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
404 "22:\n" \ 411 "22:\n" \
405 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 412 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
406 "SUB %3, %3, #16\n" \
407 "23:\n" \ 413 "23:\n" \
408 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
409 "24:\n" \
410 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
411 "SUB %3, %3, #16\n" \ 414 "SUB %3, %3, #16\n" \
412 "25:\n" \ 415 "24:\n" \
413 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 416 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
414 "26:\n" \ 417 "25:\n" \
415 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 418 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
419 "26:\n" \
416 "SUB %3, %3, #16\n" \ 420 "SUB %3, %3, #16\n" \
417 "27:\n" \ 421 "27:\n" \
418 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 422 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
419 "28:\n" \ 423 "28:\n" \
420 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 424 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
425 "29:\n" \
426 "SUB %3, %3, #16\n" \
427 "30:\n" \
428 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
429 "31:\n" \
430 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
431 "32:\n" \
421 "SUB %3, %3, #16\n" \ 432 "SUB %3, %3, #16\n" \
422 "DCACHE [%1+#-64], D0Ar6\n" \ 433 "DCACHE [%1+#-64], D0Ar6\n" \
423 "BR $Lloop"id"\n" \ 434 "BR $Lloop"id"\n" \
424 \ 435 \
425 "MOV RAPF, %1\n" \ 436 "MOV RAPF, %1\n" \
426 "29:\n" \ 437 "33:\n" \
427 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 438 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
428 "30:\n" \ 439 "34:\n" \
429 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 440 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
441 "35:\n" \
430 "SUB %3, %3, #16\n" \ 442 "SUB %3, %3, #16\n" \
431 "31:\n" \ 443 "36:\n" \
432 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 444 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
433 "32:\n" \ 445 "37:\n" \
434 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 446 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
447 "38:\n" \
435 "SUB %3, %3, #16\n" \ 448 "SUB %3, %3, #16\n" \
436 "33:\n" \ 449 "39:\n" \
437 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 450 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
438 "34:\n" \ 451 "40:\n" \
439 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 452 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
453 "41:\n" \
440 "SUB %3, %3, #16\n" \ 454 "SUB %3, %3, #16\n" \
441 "35:\n" \ 455 "42:\n" \
442 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 456 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
443 "36:\n" \ 457 "43:\n" \
444 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 458 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
459 "44:\n" \
445 "SUB %0, %0, #4\n" \ 460 "SUB %0, %0, #4\n" \
446 "37:\n" \ 461 "45:\n" \
447 "SETD [%0++], D0.7\n" \ 462 "SETD [%0++], D0.7\n" \
448 "SUB %3, %3, #16\n" \ 463 "SUB %3, %3, #16\n" \
449 "1:" \ 464 "1:" \
@@ -483,11 +498,19 @@
483 " .long 34b,3b\n" \ 498 " .long 34b,3b\n" \
484 " .long 35b,3b\n" \ 499 " .long 35b,3b\n" \
485 " .long 36b,3b\n" \ 500 " .long 36b,3b\n" \
486 " .long 37b,4b\n" \ 501 " .long 37b,3b\n" \
502 " .long 38b,3b\n" \
503 " .long 39b,3b\n" \
504 " .long 40b,3b\n" \
505 " .long 41b,3b\n" \
506 " .long 42b,3b\n" \
507 " .long 43b,3b\n" \
508 " .long 44b,3b\n" \
509 " .long 45b,4b\n" \
487 " .previous\n" \ 510 " .previous\n" \
488 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ 511 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
489 : "0" (to), "1" (from), "2" (ret), "3" (n) \ 512 : "0" (to), "1" (from), "2" (ret), "3" (n) \
490 : "D1Ar1", "D0Ar2", "memory") 513 : "D1Ar1", "D0Ar2", "cc", "memory")
491 514
492/* rewind 'to' and 'from' pointers when a fault occurs 515/* rewind 'to' and 'from' pointers when a fault occurs
493 * 516 *
@@ -513,7 +536,7 @@
513#define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\ 536#define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
514 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ 537 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
515 "LSR D0Ar2, D0Ar2, #8\n" \ 538 "LSR D0Ar2, D0Ar2, #8\n" \
516 "AND D0Ar2, D0Ar2, #0x7\n" \ 539 "ANDS D0Ar2, D0Ar2, #0x7\n" \
517 "ADDZ D0Ar2, D0Ar2, #4\n" \ 540 "ADDZ D0Ar2, D0Ar2, #4\n" \
518 "SUB D0Ar2, D0Ar2, #1\n" \ 541 "SUB D0Ar2, D0Ar2, #1\n" \
519 "MOV D1Ar1, #4\n" \ 542 "MOV D1Ar1, #4\n" \
@@ -538,23 +561,31 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
538 if ((unsigned long) src & 1) { 561 if ((unsigned long) src & 1) {
539 __asm_copy_to_user_1(dst, src, retn); 562 __asm_copy_to_user_1(dst, src, retn);
540 n--; 563 n--;
564 if (retn)
565 return retn + n;
541 } 566 }
542 if ((unsigned long) dst & 1) { 567 if ((unsigned long) dst & 1) {
543 /* Worst case - byte copy */ 568 /* Worst case - byte copy */
544 while (n > 0) { 569 while (n > 0) {
545 __asm_copy_to_user_1(dst, src, retn); 570 __asm_copy_to_user_1(dst, src, retn);
546 n--; 571 n--;
572 if (retn)
573 return retn + n;
547 } 574 }
548 } 575 }
549 if (((unsigned long) src & 2) && n >= 2) { 576 if (((unsigned long) src & 2) && n >= 2) {
550 __asm_copy_to_user_2(dst, src, retn); 577 __asm_copy_to_user_2(dst, src, retn);
551 n -= 2; 578 n -= 2;
579 if (retn)
580 return retn + n;
552 } 581 }
553 if ((unsigned long) dst & 2) { 582 if ((unsigned long) dst & 2) {
554 /* Second worst case - word copy */ 583 /* Second worst case - word copy */
555 while (n >= 2) { 584 while (n >= 2) {
556 __asm_copy_to_user_2(dst, src, retn); 585 __asm_copy_to_user_2(dst, src, retn);
557 n -= 2; 586 n -= 2;
587 if (retn)
588 return retn + n;
558 } 589 }
559 } 590 }
560 591
@@ -569,6 +600,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
569 while (n >= 8) { 600 while (n >= 8) {
570 __asm_copy_to_user_8x64(dst, src, retn); 601 __asm_copy_to_user_8x64(dst, src, retn);
571 n -= 8; 602 n -= 8;
603 if (retn)
604 return retn + n;
572 } 605 }
573 } 606 }
574 if (n >= RAPF_MIN_BUF_SIZE) { 607 if (n >= RAPF_MIN_BUF_SIZE) {
@@ -581,6 +614,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
581 while (n >= 8) { 614 while (n >= 8) {
582 __asm_copy_to_user_8x64(dst, src, retn); 615 __asm_copy_to_user_8x64(dst, src, retn);
583 n -= 8; 616 n -= 8;
617 if (retn)
618 return retn + n;
584 } 619 }
585 } 620 }
586#endif 621#endif
@@ -588,11 +623,15 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
588 while (n >= 16) { 623 while (n >= 16) {
589 __asm_copy_to_user_16(dst, src, retn); 624 __asm_copy_to_user_16(dst, src, retn);
590 n -= 16; 625 n -= 16;
626 if (retn)
627 return retn + n;
591 } 628 }
592 629
593 while (n >= 4) { 630 while (n >= 4) {
594 __asm_copy_to_user_4(dst, src, retn); 631 __asm_copy_to_user_4(dst, src, retn);
595 n -= 4; 632 n -= 4;
633 if (retn)
634 return retn + n;
596 } 635 }
597 636
598 switch (n) { 637 switch (n) {
@@ -609,6 +648,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
609 break; 648 break;
610 } 649 }
611 650
651 /*
652 * If we get here, retn correctly reflects the number of failing
653 * bytes.
654 */
612 return retn; 655 return retn;
613} 656}
614EXPORT_SYMBOL(__copy_user); 657EXPORT_SYMBOL(__copy_user);
@@ -617,16 +660,14 @@ EXPORT_SYMBOL(__copy_user);
617 __asm_copy_user_cont(to, from, ret, \ 660 __asm_copy_user_cont(to, from, ret, \
618 " GETB D1Ar1,[%1++]\n" \ 661 " GETB D1Ar1,[%1++]\n" \
619 "2: SETB [%0++],D1Ar1\n", \ 662 "2: SETB [%0++],D1Ar1\n", \
620 "3: ADD %2,%2,#1\n" \ 663 "3: ADD %2,%2,#1\n", \
621 " SETB [%0++],D1Ar1\n", \
622 " .long 2b,3b\n") 664 " .long 2b,3b\n")
623 665
624#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ 666#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
625 __asm_copy_user_cont(to, from, ret, \ 667 __asm_copy_user_cont(to, from, ret, \
626 " GETW D1Ar1,[%1++]\n" \ 668 " GETW D1Ar1,[%1++]\n" \
627 "2: SETW [%0++],D1Ar1\n" COPY, \ 669 "2: SETW [%0++],D1Ar1\n" COPY, \
628 "3: ADD %2,%2,#2\n" \ 670 "3: ADD %2,%2,#2\n" FIXUP, \
629 " SETW [%0++],D1Ar1\n" FIXUP, \
630 " .long 2b,3b\n" TENTRY) 671 " .long 2b,3b\n" TENTRY)
631 672
632#define __asm_copy_from_user_2(to, from, ret) \ 673#define __asm_copy_from_user_2(to, from, ret) \
@@ -636,145 +677,26 @@ EXPORT_SYMBOL(__copy_user);
636 __asm_copy_from_user_2x_cont(to, from, ret, \ 677 __asm_copy_from_user_2x_cont(to, from, ret, \
637 " GETB D1Ar1,[%1++]\n" \ 678 " GETB D1Ar1,[%1++]\n" \
638 "4: SETB [%0++],D1Ar1\n", \ 679 "4: SETB [%0++],D1Ar1\n", \
639 "5: ADD %2,%2,#1\n" \ 680 "5: ADD %2,%2,#1\n", \
640 " SETB [%0++],D1Ar1\n", \
641 " .long 4b,5b\n") 681 " .long 4b,5b\n")
642 682
643#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ 683#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
644 __asm_copy_user_cont(to, from, ret, \ 684 __asm_copy_user_cont(to, from, ret, \
645 " GETD D1Ar1,[%1++]\n" \ 685 " GETD D1Ar1,[%1++]\n" \
646 "2: SETD [%0++],D1Ar1\n" COPY, \ 686 "2: SETD [%0++],D1Ar1\n" COPY, \
647 "3: ADD %2,%2,#4\n" \ 687 "3: ADD %2,%2,#4\n" FIXUP, \
648 " SETD [%0++],D1Ar1\n" FIXUP, \
649 " .long 2b,3b\n" TENTRY) 688 " .long 2b,3b\n" TENTRY)
650 689
651#define __asm_copy_from_user_4(to, from, ret) \ 690#define __asm_copy_from_user_4(to, from, ret) \
652 __asm_copy_from_user_4x_cont(to, from, ret, "", "", "") 691 __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
653 692
654#define __asm_copy_from_user_5(to, from, ret) \
655 __asm_copy_from_user_4x_cont(to, from, ret, \
656 " GETB D1Ar1,[%1++]\n" \
657 "4: SETB [%0++],D1Ar1\n", \
658 "5: ADD %2,%2,#1\n" \
659 " SETB [%0++],D1Ar1\n", \
660 " .long 4b,5b\n")
661
662#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
663 __asm_copy_from_user_4x_cont(to, from, ret, \
664 " GETW D1Ar1,[%1++]\n" \
665 "4: SETW [%0++],D1Ar1\n" COPY, \
666 "5: ADD %2,%2,#2\n" \
667 " SETW [%0++],D1Ar1\n" FIXUP, \
668 " .long 4b,5b\n" TENTRY)
669
670#define __asm_copy_from_user_6(to, from, ret) \
671 __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
672
673#define __asm_copy_from_user_7(to, from, ret) \
674 __asm_copy_from_user_6x_cont(to, from, ret, \
675 " GETB D1Ar1,[%1++]\n" \
676 "6: SETB [%0++],D1Ar1\n", \
677 "7: ADD %2,%2,#1\n" \
678 " SETB [%0++],D1Ar1\n", \
679 " .long 6b,7b\n")
680
681#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
682 __asm_copy_from_user_4x_cont(to, from, ret, \
683 " GETD D1Ar1,[%1++]\n" \
684 "4: SETD [%0++],D1Ar1\n" COPY, \
685 "5: ADD %2,%2,#4\n" \
686 " SETD [%0++],D1Ar1\n" FIXUP, \
687 " .long 4b,5b\n" TENTRY)
688
689#define __asm_copy_from_user_8(to, from, ret) \
690 __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
691
692#define __asm_copy_from_user_9(to, from, ret) \
693 __asm_copy_from_user_8x_cont(to, from, ret, \
694 " GETB D1Ar1,[%1++]\n" \
695 "6: SETB [%0++],D1Ar1\n", \
696 "7: ADD %2,%2,#1\n" \
697 " SETB [%0++],D1Ar1\n", \
698 " .long 6b,7b\n")
699
700#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
701 __asm_copy_from_user_8x_cont(to, from, ret, \
702 " GETW D1Ar1,[%1++]\n" \
703 "6: SETW [%0++],D1Ar1\n" COPY, \
704 "7: ADD %2,%2,#2\n" \
705 " SETW [%0++],D1Ar1\n" FIXUP, \
706 " .long 6b,7b\n" TENTRY)
707
708#define __asm_copy_from_user_10(to, from, ret) \
709 __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
710
711#define __asm_copy_from_user_11(to, from, ret) \
712 __asm_copy_from_user_10x_cont(to, from, ret, \
713 " GETB D1Ar1,[%1++]\n" \
714 "8: SETB [%0++],D1Ar1\n", \
715 "9: ADD %2,%2,#1\n" \
716 " SETB [%0++],D1Ar1\n", \
717 " .long 8b,9b\n")
718
719#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
720 __asm_copy_from_user_8x_cont(to, from, ret, \
721 " GETD D1Ar1,[%1++]\n" \
722 "6: SETD [%0++],D1Ar1\n" COPY, \
723 "7: ADD %2,%2,#4\n" \
724 " SETD [%0++],D1Ar1\n" FIXUP, \
725 " .long 6b,7b\n" TENTRY)
726
727#define __asm_copy_from_user_12(to, from, ret) \
728 __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
729
730#define __asm_copy_from_user_13(to, from, ret) \
731 __asm_copy_from_user_12x_cont(to, from, ret, \
732 " GETB D1Ar1,[%1++]\n" \
733 "8: SETB [%0++],D1Ar1\n", \
734 "9: ADD %2,%2,#1\n" \
735 " SETB [%0++],D1Ar1\n", \
736 " .long 8b,9b\n")
737
738#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
739 __asm_copy_from_user_12x_cont(to, from, ret, \
740 " GETW D1Ar1,[%1++]\n" \
741 "8: SETW [%0++],D1Ar1\n" COPY, \
742 "9: ADD %2,%2,#2\n" \
743 " SETW [%0++],D1Ar1\n" FIXUP, \
744 " .long 8b,9b\n" TENTRY)
745
746#define __asm_copy_from_user_14(to, from, ret) \
747 __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
748
749#define __asm_copy_from_user_15(to, from, ret) \
750 __asm_copy_from_user_14x_cont(to, from, ret, \
751 " GETB D1Ar1,[%1++]\n" \
752 "10: SETB [%0++],D1Ar1\n", \
753 "11: ADD %2,%2,#1\n" \
754 " SETB [%0++],D1Ar1\n", \
755 " .long 10b,11b\n")
756
757#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
758 __asm_copy_from_user_12x_cont(to, from, ret, \
759 " GETD D1Ar1,[%1++]\n" \
760 "8: SETD [%0++],D1Ar1\n" COPY, \
761 "9: ADD %2,%2,#4\n" \
762 " SETD [%0++],D1Ar1\n" FIXUP, \
763 " .long 8b,9b\n" TENTRY)
764
765#define __asm_copy_from_user_16(to, from, ret) \
766 __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
767
768#define __asm_copy_from_user_8x64(to, from, ret) \ 693#define __asm_copy_from_user_8x64(to, from, ret) \
769 asm volatile ( \ 694 asm volatile ( \
770 " GETL D0Ar2,D1Ar1,[%1++]\n" \ 695 " GETL D0Ar2,D1Ar1,[%1++]\n" \
771 "2: SETL [%0++],D0Ar2,D1Ar1\n" \ 696 "2: SETL [%0++],D0Ar2,D1Ar1\n" \
772 "1:\n" \ 697 "1:\n" \
773 " .section .fixup,\"ax\"\n" \ 698 " .section .fixup,\"ax\"\n" \
774 " MOV D1Ar1,#0\n" \
775 " MOV D0Ar2,#0\n" \
776 "3: ADD %2,%2,#8\n" \ 699 "3: ADD %2,%2,#8\n" \
777 " SETL [%0++],D0Ar2,D1Ar1\n" \
778 " MOVT D0Ar2,#HI(1b)\n" \ 700 " MOVT D0Ar2,#HI(1b)\n" \
779 " JUMP D0Ar2,#LO(1b)\n" \ 701 " JUMP D0Ar2,#LO(1b)\n" \
780 " .previous\n" \ 702 " .previous\n" \
@@ -789,36 +711,57 @@ EXPORT_SYMBOL(__copy_user);
789 * 711 *
790 * Rationale: 712 * Rationale:
791 * A fault occurs while reading from user buffer, which is the 713 * A fault occurs while reading from user buffer, which is the
792 * source. Since the fault is at a single address, we only 714 * source.
793 * need to rewind by 8 bytes.
794 * Since we don't write to kernel buffer until we read first, 715 * Since we don't write to kernel buffer until we read first,
795 * the kernel buffer is at the right state and needn't be 716 * the kernel buffer is at the right state and needn't be
796 * corrected. 717 * corrected, but the source must be rewound to the beginning of
718 * the block, which is LSM_STEP*8 bytes.
719 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
720 * and stored in D0Ar2
721 *
722 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
723 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
724 * a fault happens at the 4th write, LSM_STEP will be 0
725 * instead of 4. The code copes with that.
797 */ 726 */
798#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \ 727#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
799 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ 728 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
800 "SUB %1, %1, #8\n") 729 "LSR D0Ar2, D0Ar2, #5\n" \
730 "ANDS D0Ar2, D0Ar2, #0x38\n" \
731 "ADDZ D0Ar2, D0Ar2, #32\n" \
732 "SUB %1, %1, D0Ar2\n")
801 733
802/* rewind 'from' pointer when a fault occurs 734/* rewind 'from' pointer when a fault occurs
803 * 735 *
804 * Rationale: 736 * Rationale:
805 * A fault occurs while reading from user buffer, which is the 737 * A fault occurs while reading from user buffer, which is the
806 * source. Since the fault is at a single address, we only 738 * source.
807 * need to rewind by 4 bytes.
808 * Since we don't write to kernel buffer until we read first, 739 * Since we don't write to kernel buffer until we read first,
809 * the kernel buffer is at the right state and needn't be 740 * the kernel buffer is at the right state and needn't be
810 * corrected. 741 * corrected, but the source must be rewound to the beginning of
742 * the block, which is LSM_STEP*4 bytes.
743 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
744 * and stored in D0Ar2
745 *
746 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
747 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
748 * a fault happens at the 4th write, LSM_STEP will be 0
749 * instead of 4. The code copes with that.
811 */ 750 */
812#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \ 751#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
813 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ 752 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
814 "SUB %1, %1, #4\n") 753 "LSR D0Ar2, D0Ar2, #6\n" \
754 "ANDS D0Ar2, D0Ar2, #0x1c\n" \
755 "ADDZ D0Ar2, D0Ar2, #16\n" \
756 "SUB %1, %1, D0Ar2\n")
815 757
816 758
817/* Copy from user to kernel, zeroing the bytes that were inaccessible in 759/*
818 userland. The return-value is the number of bytes that were 760 * Copy from user to kernel. The return-value is the number of bytes that were
819 inaccessible. */ 761 * inaccessible.
820unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, 762 */
821 unsigned long n) 763unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
764 unsigned long n)
822{ 765{
823 register char *dst asm ("A0.2") = pdst; 766 register char *dst asm ("A0.2") = pdst;
824 register const char __user *src asm ("A1.2") = psrc; 767 register const char __user *src asm ("A1.2") = psrc;
@@ -830,6 +773,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
830 if ((unsigned long) src & 1) { 773 if ((unsigned long) src & 1) {
831 __asm_copy_from_user_1(dst, src, retn); 774 __asm_copy_from_user_1(dst, src, retn);
832 n--; 775 n--;
776 if (retn)
777 return retn + n;
833 } 778 }
834 if ((unsigned long) dst & 1) { 779 if ((unsigned long) dst & 1) {
835 /* Worst case - byte copy */ 780 /* Worst case - byte copy */
@@ -837,12 +782,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
837 __asm_copy_from_user_1(dst, src, retn); 782 __asm_copy_from_user_1(dst, src, retn);
838 n--; 783 n--;
839 if (retn) 784 if (retn)
840 goto copy_exception_bytes; 785 return retn + n;
841 } 786 }
842 } 787 }
843 if (((unsigned long) src & 2) && n >= 2) { 788 if (((unsigned long) src & 2) && n >= 2) {
844 __asm_copy_from_user_2(dst, src, retn); 789 __asm_copy_from_user_2(dst, src, retn);
845 n -= 2; 790 n -= 2;
791 if (retn)
792 return retn + n;
846 } 793 }
847 if ((unsigned long) dst & 2) { 794 if ((unsigned long) dst & 2) {
848 /* Second worst case - word copy */ 795 /* Second worst case - word copy */
@@ -850,16 +797,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
850 __asm_copy_from_user_2(dst, src, retn); 797 __asm_copy_from_user_2(dst, src, retn);
851 n -= 2; 798 n -= 2;
852 if (retn) 799 if (retn)
853 goto copy_exception_bytes; 800 return retn + n;
854 } 801 }
855 } 802 }
856 803
857 /* We only need one check after the unalignment-adjustments,
858 because if both adjustments were done, either both or
859 neither reference had an exception. */
860 if (retn != 0)
861 goto copy_exception_bytes;
862
863#ifdef USE_RAPF 804#ifdef USE_RAPF
864 /* 64 bit copy loop */ 805 /* 64 bit copy loop */
865 if (!(((unsigned long) src | (unsigned long) dst) & 7)) { 806 if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
@@ -872,7 +813,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
872 __asm_copy_from_user_8x64(dst, src, retn); 813 __asm_copy_from_user_8x64(dst, src, retn);
873 n -= 8; 814 n -= 8;
874 if (retn) 815 if (retn)
875 goto copy_exception_bytes; 816 return retn + n;
876 } 817 }
877 } 818 }
878 819
@@ -888,7 +829,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
888 __asm_copy_from_user_8x64(dst, src, retn); 829 __asm_copy_from_user_8x64(dst, src, retn);
889 n -= 8; 830 n -= 8;
890 if (retn) 831 if (retn)
891 goto copy_exception_bytes; 832 return retn + n;
892 } 833 }
893 } 834 }
894#endif 835#endif
@@ -898,7 +839,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
898 n -= 4; 839 n -= 4;
899 840
900 if (retn) 841 if (retn)
901 goto copy_exception_bytes; 842 return retn + n;
902 } 843 }
903 844
904 /* If we get here, there were no memory read faults. */ 845 /* If we get here, there were no memory read faults. */
@@ -924,21 +865,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
924 /* If we get here, retn correctly reflects the number of failing 865 /* If we get here, retn correctly reflects the number of failing
925 bytes. */ 866 bytes. */
926 return retn; 867 return retn;
927
928 copy_exception_bytes:
929 /* We already have "retn" bytes cleared, and need to clear the
930 remaining "n" bytes. A non-optimized simple byte-for-byte in-line
931 memset is preferred here, since this isn't speed-critical code and
932 we'd rather have this a leaf-function than calling memset. */
933 {
934 char *endp;
935 for (endp = dst + n; dst < endp; dst++)
936 *dst = 0;
937 }
938
939 return retn + n;
940} 868}
941EXPORT_SYMBOL(__copy_user_zeroing); 869EXPORT_SYMBOL(raw_copy_from_user);
942 870
943#define __asm_clear_8x64(to, ret) \ 871#define __asm_clear_8x64(to, ret) \
944 asm volatile ( \ 872 asm volatile ( \
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index a008a9f03072..e0bb576410bb 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1531,7 +1531,7 @@ config CPU_MIPS64_R6
1531 select CPU_SUPPORTS_HIGHMEM 1531 select CPU_SUPPORTS_HIGHMEM
1532 select CPU_SUPPORTS_MSA 1532 select CPU_SUPPORTS_MSA
1533 select GENERIC_CSUM 1533 select GENERIC_CSUM
1534 select MIPS_O32_FP64_SUPPORT if MIPS32_O32 1534 select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32
1535 select HAVE_KVM 1535 select HAVE_KVM
1536 help 1536 help
1537 Choose this option to build a kernel for release 6 or later of the 1537 Choose this option to build a kernel for release 6 or later of the
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index f94455f964ec..a2813fe381cf 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -21,6 +21,7 @@
21#include <asm/cpu-features.h> 21#include <asm/cpu-features.h>
22#include <asm/fpu_emulator.h> 22#include <asm/fpu_emulator.h>
23#include <asm/hazards.h> 23#include <asm/hazards.h>
24#include <asm/ptrace.h>
24#include <asm/processor.h> 25#include <asm/processor.h>
25#include <asm/current.h> 26#include <asm/current.h>
26#include <asm/msa.h> 27#include <asm/msa.h>
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index 956db6e201d1..ddd1c918103b 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -18,9 +18,24 @@
18#include <irq.h> 18#include <irq.h>
19 19
20#define IRQ_STACK_SIZE THREAD_SIZE 20#define IRQ_STACK_SIZE THREAD_SIZE
21#define IRQ_STACK_START (IRQ_STACK_SIZE - sizeof(unsigned long))
21 22
22extern void *irq_stack[NR_CPUS]; 23extern void *irq_stack[NR_CPUS];
23 24
25/*
26 * The highest address on the IRQ stack contains a dummy frame put down in
27 * genex.S (handle_int & except_vec_vi_handler) which is structured as follows:
28 *
29 * top ------------
30 * | task sp | <- irq_stack[cpu] + IRQ_STACK_START
31 * ------------
32 * | | <- First frame of IRQ context
33 * ------------
34 *
35 * task sp holds a copy of the task stack pointer where the struct pt_regs
36 * from exception entry can be found.
37 */
38
24static inline bool on_irq_stack(int cpu, unsigned long sp) 39static inline bool on_irq_stack(int cpu, unsigned long sp)
25{ 40{
26 unsigned long low = (unsigned long)irq_stack[cpu]; 41 unsigned long low = (unsigned long)irq_stack[cpu];
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index f485afe51514..a8df44d60607 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -127,7 +127,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
127 " andi %[ticket], %[ticket], 0xffff \n" 127 " andi %[ticket], %[ticket], 0xffff \n"
128 " bne %[ticket], %[my_ticket], 4f \n" 128 " bne %[ticket], %[my_ticket], 4f \n"
129 " subu %[ticket], %[my_ticket], %[ticket] \n" 129 " subu %[ticket], %[my_ticket], %[ticket] \n"
130 "2: \n" 130 "2: .insn \n"
131 " .subsection 2 \n" 131 " .subsection 2 \n"
132 "4: andi %[ticket], %[ticket], 0xffff \n" 132 "4: andi %[ticket], %[ticket], 0xffff \n"
133 " sll %[ticket], 5 \n" 133 " sll %[ticket], 5 \n"
@@ -202,7 +202,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
202 " sc %[ticket], %[ticket_ptr] \n" 202 " sc %[ticket], %[ticket_ptr] \n"
203 " beqz %[ticket], 1b \n" 203 " beqz %[ticket], 1b \n"
204 " li %[ticket], 1 \n" 204 " li %[ticket], 1 \n"
205 "2: \n" 205 "2: .insn \n"
206 " .subsection 2 \n" 206 " .subsection 2 \n"
207 "3: b 2b \n" 207 "3: b 2b \n"
208 " li %[ticket], 0 \n" 208 " li %[ticket], 0 \n"
@@ -382,7 +382,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
382 " .set reorder \n" 382 " .set reorder \n"
383 __WEAK_LLSC_MB 383 __WEAK_LLSC_MB
384 " li %2, 1 \n" 384 " li %2, 1 \n"
385 "2: \n" 385 "2: .insn \n"
386 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 386 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
387 : GCC_OFF_SMALL_ASM() (rw->lock) 387 : GCC_OFF_SMALL_ASM() (rw->lock)
388 : "memory"); 388 : "memory");
@@ -422,7 +422,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
422 " lui %1, 0x8000 \n" 422 " lui %1, 0x8000 \n"
423 " sc %1, %0 \n" 423 " sc %1, %0 \n"
424 " li %2, 1 \n" 424 " li %2, 1 \n"
425 "2: \n" 425 "2: .insn \n"
426 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), 426 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
427 "=&r" (ret) 427 "=&r" (ret)
428 : GCC_OFF_SMALL_ASM() (rw->lock) 428 : GCC_OFF_SMALL_ASM() (rw->lock)
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 3e940dbe0262..78faf4292e90 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -386,17 +386,18 @@
386#define __NR_pkey_mprotect (__NR_Linux + 363) 386#define __NR_pkey_mprotect (__NR_Linux + 363)
387#define __NR_pkey_alloc (__NR_Linux + 364) 387#define __NR_pkey_alloc (__NR_Linux + 364)
388#define __NR_pkey_free (__NR_Linux + 365) 388#define __NR_pkey_free (__NR_Linux + 365)
389#define __NR_statx (__NR_Linux + 366)
389 390
390 391
391/* 392/*
392 * Offset of the last Linux o32 flavoured syscall 393 * Offset of the last Linux o32 flavoured syscall
393 */ 394 */
394#define __NR_Linux_syscalls 365 395#define __NR_Linux_syscalls 366
395 396
396#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 397#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
397 398
398#define __NR_O32_Linux 4000 399#define __NR_O32_Linux 4000
399#define __NR_O32_Linux_syscalls 365 400#define __NR_O32_Linux_syscalls 366
400 401
401#if _MIPS_SIM == _MIPS_SIM_ABI64 402#if _MIPS_SIM == _MIPS_SIM_ABI64
402 403
@@ -730,16 +731,17 @@
730#define __NR_pkey_mprotect (__NR_Linux + 323) 731#define __NR_pkey_mprotect (__NR_Linux + 323)
731#define __NR_pkey_alloc (__NR_Linux + 324) 732#define __NR_pkey_alloc (__NR_Linux + 324)
732#define __NR_pkey_free (__NR_Linux + 325) 733#define __NR_pkey_free (__NR_Linux + 325)
734#define __NR_statx (__NR_Linux + 326)
733 735
734/* 736/*
735 * Offset of the last Linux 64-bit flavoured syscall 737 * Offset of the last Linux 64-bit flavoured syscall
736 */ 738 */
737#define __NR_Linux_syscalls 325 739#define __NR_Linux_syscalls 326
738 740
739#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 741#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
740 742
741#define __NR_64_Linux 5000 743#define __NR_64_Linux 5000
742#define __NR_64_Linux_syscalls 325 744#define __NR_64_Linux_syscalls 326
743 745
744#if _MIPS_SIM == _MIPS_SIM_NABI32 746#if _MIPS_SIM == _MIPS_SIM_NABI32
745 747
@@ -1077,15 +1079,16 @@
1077#define __NR_pkey_mprotect (__NR_Linux + 327) 1079#define __NR_pkey_mprotect (__NR_Linux + 327)
1078#define __NR_pkey_alloc (__NR_Linux + 328) 1080#define __NR_pkey_alloc (__NR_Linux + 328)
1079#define __NR_pkey_free (__NR_Linux + 329) 1081#define __NR_pkey_free (__NR_Linux + 329)
1082#define __NR_statx (__NR_Linux + 330)
1080 1083
1081/* 1084/*
1082 * Offset of the last N32 flavoured syscall 1085 * Offset of the last N32 flavoured syscall
1083 */ 1086 */
1084#define __NR_Linux_syscalls 329 1087#define __NR_Linux_syscalls 330
1085 1088
1086#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 1089#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
1087 1090
1088#define __NR_N32_Linux 6000 1091#define __NR_N32_Linux 6000
1089#define __NR_N32_Linux_syscalls 329 1092#define __NR_N32_Linux_syscalls 330
1090 1093
1091#endif /* _UAPI_ASM_UNISTD_H */ 1094#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index bb5c5d34ba81..a670c0c11875 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -102,6 +102,7 @@ void output_thread_info_defines(void)
102 DEFINE(_THREAD_SIZE, THREAD_SIZE); 102 DEFINE(_THREAD_SIZE, THREAD_SIZE);
103 DEFINE(_THREAD_MASK, THREAD_MASK); 103 DEFINE(_THREAD_MASK, THREAD_MASK);
104 DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE); 104 DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
105 DEFINE(_IRQ_STACK_START, IRQ_STACK_START);
105 BLANK(); 106 BLANK();
106} 107}
107 108
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 59476a607add..a00e87b0256d 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -361,7 +361,7 @@ LEAF(mips_cps_get_bootcfg)
361 END(mips_cps_get_bootcfg) 361 END(mips_cps_get_bootcfg)
362 362
363LEAF(mips_cps_boot_vpes) 363LEAF(mips_cps_boot_vpes)
364 PTR_L ta2, COREBOOTCFG_VPEMASK(a0) 364 lw ta2, COREBOOTCFG_VPEMASK(a0)
365 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) 365 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0)
366 366
367#if defined(CONFIG_CPU_MIPSR6) 367#if defined(CONFIG_CPU_MIPSR6)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 07718bb5fc9d..12422fd4af23 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -1824,7 +1824,7 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
1824 } 1824 }
1825 1825
1826 decode_configs(c); 1826 decode_configs(c);
1827 c->options |= MIPS_CPU_TLBINV | MIPS_CPU_LDPTE; 1827 c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
1828 c->writecombine = _CACHE_UNCACHED_ACCELERATED; 1828 c->writecombine = _CACHE_UNCACHED_ACCELERATED;
1829 break; 1829 break;
1830 default: 1830 default:
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 7ec9612cb007..ae810da4d499 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -215,9 +215,11 @@ NESTED(handle_int, PT_SIZE, sp)
215 beq t0, t1, 2f 215 beq t0, t1, 2f
216 216
217 /* Switch to IRQ stack */ 217 /* Switch to IRQ stack */
218 li t1, _IRQ_STACK_SIZE 218 li t1, _IRQ_STACK_START
219 PTR_ADD sp, t0, t1 219 PTR_ADD sp, t0, t1
220 220
221 /* Save task's sp on IRQ stack so that unwinding can follow it */
222 LONG_S s1, 0(sp)
2212: 2232:
222 jal plat_irq_dispatch 224 jal plat_irq_dispatch
223 225
@@ -325,9 +327,11 @@ NESTED(except_vec_vi_handler, 0, sp)
325 beq t0, t1, 2f 327 beq t0, t1, 2f
326 328
327 /* Switch to IRQ stack */ 329 /* Switch to IRQ stack */
328 li t1, _IRQ_STACK_SIZE 330 li t1, _IRQ_STACK_START
329 PTR_ADD sp, t0, t1 331 PTR_ADD sp, t0, t1
330 332
333 /* Save task's sp on IRQ stack so that unwinding can follow it */
334 LONG_S s1, 0(sp)
3312: 3352:
332 jalr v0 336 jalr v0
333 337
@@ -519,7 +523,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
519 BUILD_HANDLER reserved reserved sti verbose /* others */ 523 BUILD_HANDLER reserved reserved sti verbose /* others */
520 524
521 .align 5 525 .align 5
522 LEAF(handle_ri_rdhwr_vivt) 526 LEAF(handle_ri_rdhwr_tlbp)
523 .set push 527 .set push
524 .set noat 528 .set noat
525 .set noreorder 529 .set noreorder
@@ -538,7 +542,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
538 .set pop 542 .set pop
539 bltz k1, handle_ri /* slow path */ 543 bltz k1, handle_ri /* slow path */
540 /* fall thru */ 544 /* fall thru */
541 END(handle_ri_rdhwr_vivt) 545 END(handle_ri_rdhwr_tlbp)
542 546
543 LEAF(handle_ri_rdhwr) 547 LEAF(handle_ri_rdhwr)
544 .set push 548 .set push
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index fb6b6b650719..b68e10fc453d 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -488,31 +488,52 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
488 unsigned long pc, 488 unsigned long pc,
489 unsigned long *ra) 489 unsigned long *ra)
490{ 490{
491 unsigned long low, high, irq_stack_high;
491 struct mips_frame_info info; 492 struct mips_frame_info info;
492 unsigned long size, ofs; 493 unsigned long size, ofs;
494 struct pt_regs *regs;
493 int leaf; 495 int leaf;
494 extern void ret_from_irq(void);
495 extern void ret_from_exception(void);
496 496
497 if (!stack_page) 497 if (!stack_page)
498 return 0; 498 return 0;
499 499
500 /* 500 /*
501 * If we reached the bottom of interrupt context, 501 * IRQ stacks start at IRQ_STACK_START
502 * return saved pc in pt_regs. 502 * task stacks at THREAD_SIZE - 32
503 */ 503 */
504 if (pc == (unsigned long)ret_from_irq || 504 low = stack_page;
505 pc == (unsigned long)ret_from_exception) { 505 if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
506 struct pt_regs *regs; 506 high = stack_page + IRQ_STACK_START;
507 if (*sp >= stack_page && 507 irq_stack_high = high;
508 *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) { 508 } else {
509 regs = (struct pt_regs *)*sp; 509 high = stack_page + THREAD_SIZE - 32;
510 pc = regs->cp0_epc; 510 irq_stack_high = 0;
511 if (!user_mode(regs) && __kernel_text_address(pc)) { 511 }
512 *sp = regs->regs[29]; 512
513 *ra = regs->regs[31]; 513 /*
514 return pc; 514 * If we reached the top of the interrupt stack, start unwinding
515 } 515 * the interrupted task stack.
516 */
517 if (unlikely(*sp == irq_stack_high)) {
518 unsigned long task_sp = *(unsigned long *)*sp;
519
520 /*
521 * Check that the pointer saved in the IRQ stack head points to
522 * something within the stack of the current task
523 */
524 if (!object_is_on_stack((void *)task_sp))
525 return 0;
526
527 /*
528 * Follow pointer to tasks kernel stack frame where interrupted
529 * state was saved.
530 */
531 regs = (struct pt_regs *)task_sp;
532 pc = regs->cp0_epc;
533 if (!user_mode(regs) && __kernel_text_address(pc)) {
534 *sp = regs->regs[29];
535 *ra = regs->regs[31];
536 return pc;
516 } 537 }
517 return 0; 538 return 0;
518 } 539 }
@@ -533,8 +554,7 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
533 if (leaf < 0) 554 if (leaf < 0)
534 return 0; 555 return 0;
535 556
536 if (*sp < stack_page || 557 if (*sp < low || *sp + info.frame_size > high)
537 *sp + info.frame_size > stack_page + THREAD_SIZE - 32)
538 return 0; 558 return 0;
539 559
540 if (leaf) 560 if (leaf)
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 339601267265..6931fe722a0b 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -456,7 +456,8 @@ static int fpr_set(struct task_struct *target,
456 &target->thread.fpu, 456 &target->thread.fpu,
457 0, sizeof(elf_fpregset_t)); 457 0, sizeof(elf_fpregset_t));
458 458
459 for (i = 0; i < NUM_FPU_REGS; i++) { 459 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
460 for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) {
460 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 461 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
461 &fpr_val, i * sizeof(elf_fpreg_t), 462 &fpr_val, i * sizeof(elf_fpreg_t),
462 (i + 1) * sizeof(elf_fpreg_t)); 463 (i + 1) * sizeof(elf_fpreg_t));
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index c29d397eee86..80ed68b2c95e 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -600,3 +600,4 @@ EXPORT(sys_call_table)
600 PTR sys_pkey_mprotect 600 PTR sys_pkey_mprotect
601 PTR sys_pkey_alloc 601 PTR sys_pkey_alloc
602 PTR sys_pkey_free /* 4365 */ 602 PTR sys_pkey_free /* 4365 */
603 PTR sys_statx
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 0687f96ee912..49765b44aa9b 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -438,4 +438,5 @@ EXPORT(sys_call_table)
438 PTR sys_pkey_mprotect 438 PTR sys_pkey_mprotect
439 PTR sys_pkey_alloc 439 PTR sys_pkey_alloc
440 PTR sys_pkey_free /* 5325 */ 440 PTR sys_pkey_free /* 5325 */
441 PTR sys_statx
441 .size sys_call_table,.-sys_call_table 442 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 0331ba39a065..90bad2d1b2d3 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -433,4 +433,5 @@ EXPORT(sysn32_call_table)
433 PTR sys_pkey_mprotect 433 PTR sys_pkey_mprotect
434 PTR sys_pkey_alloc 434 PTR sys_pkey_alloc
435 PTR sys_pkey_free 435 PTR sys_pkey_free
436 PTR sys_statx /* 6330 */
436 .size sysn32_call_table,.-sysn32_call_table 437 .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 5a47042dd25f..2dd70bd104e1 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -588,4 +588,5 @@ EXPORT(sys32_call_table)
588 PTR sys_pkey_mprotect 588 PTR sys_pkey_mprotect
589 PTR sys_pkey_alloc 589 PTR sys_pkey_alloc
590 PTR sys_pkey_free /* 4365 */ 590 PTR sys_pkey_free /* 4365 */
591 PTR sys_statx
591 .size sys32_call_table,.-sys32_call_table 592 .size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index c7d17cfb32f6..b49e7bf9f950 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -83,7 +83,7 @@ extern asmlinkage void handle_dbe(void);
83extern asmlinkage void handle_sys(void); 83extern asmlinkage void handle_sys(void);
84extern asmlinkage void handle_bp(void); 84extern asmlinkage void handle_bp(void);
85extern asmlinkage void handle_ri(void); 85extern asmlinkage void handle_ri(void);
86extern asmlinkage void handle_ri_rdhwr_vivt(void); 86extern asmlinkage void handle_ri_rdhwr_tlbp(void);
87extern asmlinkage void handle_ri_rdhwr(void); 87extern asmlinkage void handle_ri_rdhwr(void);
88extern asmlinkage void handle_cpu(void); 88extern asmlinkage void handle_cpu(void);
89extern asmlinkage void handle_ov(void); 89extern asmlinkage void handle_ov(void);
@@ -2408,9 +2408,18 @@ void __init trap_init(void)
2408 2408
2409 set_except_vector(EXCCODE_SYS, handle_sys); 2409 set_except_vector(EXCCODE_SYS, handle_sys);
2410 set_except_vector(EXCCODE_BP, handle_bp); 2410 set_except_vector(EXCCODE_BP, handle_bp);
2411 set_except_vector(EXCCODE_RI, rdhwr_noopt ? handle_ri : 2411
2412 (cpu_has_vtag_icache ? 2412 if (rdhwr_noopt)
2413 handle_ri_rdhwr_vivt : handle_ri_rdhwr)); 2413 set_except_vector(EXCCODE_RI, handle_ri);
2414 else {
2415 if (cpu_has_vtag_icache)
2416 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2417 else if (current_cpu_type() == CPU_LOONGSON3)
2418 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2419 else
2420 set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
2421 }
2422
2414 set_except_vector(EXCCODE_CPU, handle_cpu); 2423 set_except_vector(EXCCODE_CPU, handle_cpu);
2415 set_except_vector(EXCCODE_OV, handle_ov); 2424 set_except_vector(EXCCODE_OV, handle_ov);
2416 set_except_vector(EXCCODE_TR, handle_tr); 2425 set_except_vector(EXCCODE_TR, handle_tr);
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
index 3c3aa05891dd..95bec460b651 100644
--- a/arch/mips/lantiq/xway/sysctrl.c
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -467,7 +467,7 @@ void __init ltq_soc_init(void)
467 467
468 if (!np_xbar) 468 if (!np_xbar)
469 panic("Failed to load xbar nodes from devicetree"); 469 panic("Failed to load xbar nodes from devicetree");
470 if (of_address_to_resource(np_pmu, 0, &res_xbar)) 470 if (of_address_to_resource(np_xbar, 0, &res_xbar))
471 panic("Failed to get xbar resources"); 471 panic("Failed to get xbar resources");
472 if (!request_mem_region(res_xbar.start, resource_size(&res_xbar), 472 if (!request_mem_region(res_xbar.start, resource_size(&res_xbar),
473 res_xbar.name)) 473 res_xbar.name))
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index e7f798d55fbc..3fe99cb271a9 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1562,6 +1562,7 @@ static void probe_vcache(void)
1562 vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz; 1562 vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz;
1563 1563
1564 c->vcache.waybit = 0; 1564 c->vcache.waybit = 0;
1565 c->vcache.waysize = vcache_size / c->vcache.ways;
1565 1566
1566 pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n", 1567 pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
1567 vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz); 1568 vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
@@ -1664,6 +1665,7 @@ static void __init loongson3_sc_init(void)
1664 /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */ 1665 /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */
1665 scache_size *= 4; 1666 scache_size *= 4;
1666 c->scache.waybit = 0; 1667 c->scache.waybit = 0;
1668 c->scache.waysize = scache_size / c->scache.ways;
1667 pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n", 1669 pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1668 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); 1670 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1669 if (scache_size) 1671 if (scache_size)
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 9bfee8988eaf..4f642e07c2b1 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -760,7 +760,8 @@ static void build_huge_update_entries(u32 **p, unsigned int pte,
760static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r, 760static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
761 struct uasm_label **l, 761 struct uasm_label **l,
762 unsigned int pte, 762 unsigned int pte,
763 unsigned int ptr) 763 unsigned int ptr,
764 unsigned int flush)
764{ 765{
765#ifdef CONFIG_SMP 766#ifdef CONFIG_SMP
766 UASM_i_SC(p, pte, 0, ptr); 767 UASM_i_SC(p, pte, 0, ptr);
@@ -769,6 +770,22 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
769#else 770#else
770 UASM_i_SW(p, pte, 0, ptr); 771 UASM_i_SW(p, pte, 0, ptr);
771#endif 772#endif
773 if (cpu_has_ftlb && flush) {
774 BUG_ON(!cpu_has_tlbinv);
775
776 UASM_i_MFC0(p, ptr, C0_ENTRYHI);
777 uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
778 UASM_i_MTC0(p, ptr, C0_ENTRYHI);
779 build_tlb_write_entry(p, l, r, tlb_indexed);
780
781 uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
782 UASM_i_MTC0(p, ptr, C0_ENTRYHI);
783 build_huge_update_entries(p, pte, ptr);
784 build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
785
786 return;
787 }
788
772 build_huge_update_entries(p, pte, ptr); 789 build_huge_update_entries(p, pte, ptr);
773 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0); 790 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
774} 791}
@@ -2199,7 +2216,7 @@ static void build_r4000_tlb_load_handler(void)
2199 uasm_l_tlbl_goaround2(&l, p); 2216 uasm_l_tlbl_goaround2(&l, p);
2200 } 2217 }
2201 uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID)); 2218 uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
2202 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); 2219 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
2203#endif 2220#endif
2204 2221
2205 uasm_l_nopage_tlbl(&l, p); 2222 uasm_l_nopage_tlbl(&l, p);
@@ -2254,7 +2271,7 @@ static void build_r4000_tlb_store_handler(void)
2254 build_tlb_probe_entry(&p); 2271 build_tlb_probe_entry(&p);
2255 uasm_i_ori(&p, wr.r1, wr.r1, 2272 uasm_i_ori(&p, wr.r1, wr.r1,
2256 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); 2273 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2257 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); 2274 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
2258#endif 2275#endif
2259 2276
2260 uasm_l_nopage_tlbs(&l, p); 2277 uasm_l_nopage_tlbs(&l, p);
@@ -2310,7 +2327,7 @@ static void build_r4000_tlb_modify_handler(void)
2310 build_tlb_probe_entry(&p); 2327 build_tlb_probe_entry(&p);
2311 uasm_i_ori(&p, wr.r1, wr.r1, 2328 uasm_i_ori(&p, wr.r1, wr.r1,
2312 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); 2329 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2313 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); 2330 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
2314#endif 2331#endif
2315 2332
2316 uasm_l_nopage_tlbm(&l, p); 2333 uasm_l_nopage_tlbm(&l, p);
diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
index c4ffd43d3996..48ce701557a4 100644
--- a/arch/mips/ralink/rt3883.c
+++ b/arch/mips/ralink/rt3883.c
@@ -35,7 +35,7 @@ static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
35static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; 35static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
36static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; 36static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
37static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) }; 37static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
38static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) }; 38static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) };
39static struct rt2880_pmx_func pci_func[] = { 39static struct rt2880_pmx_func pci_func[] = {
40 FUNC("pci-dev", 0, 40, 32), 40 FUNC("pci-dev", 0, 40, 32),
41 FUNC("pci-host2", 1, 40, 32), 41 FUNC("pci-host2", 1, 40, 32),
@@ -43,7 +43,7 @@ static struct rt2880_pmx_func pci_func[] = {
43 FUNC("pci-fnc", 3, 40, 32) 43 FUNC("pci-fnc", 3, 40, 32)
44}; 44};
45static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) }; 45static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
46static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) }; 46static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
47 47
48static struct rt2880_pmx_group rt3883_pinmux_data[] = { 48static struct rt2880_pmx_group rt3883_pinmux_data[] = {
49 GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C), 49 GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),
diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c
index 367c5426157b..3901b80d4420 100644
--- a/arch/nios2/kernel/prom.c
+++ b/arch/nios2/kernel/prom.c
@@ -48,6 +48,13 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
48 return alloc_bootmem_align(size, align); 48 return alloc_bootmem_align(size, align);
49} 49}
50 50
51int __init early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
52 bool nomap)
53{
54 reserve_bootmem(base, size, BOOTMEM_DEFAULT);
55 return 0;
56}
57
51void __init early_init_devtree(void *params) 58void __init early_init_devtree(void *params)
52{ 59{
53 __be32 *dtb = (u32 *)__dtb_start; 60 __be32 *dtb = (u32 *)__dtb_start;
diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
index 6e57ffa5db27..6044d9be28b4 100644
--- a/arch/nios2/kernel/setup.c
+++ b/arch/nios2/kernel/setup.c
@@ -201,6 +201,9 @@ void __init setup_arch(char **cmdline_p)
201 } 201 }
202#endif /* CONFIG_BLK_DEV_INITRD */ 202#endif /* CONFIG_BLK_DEV_INITRD */
203 203
204 early_init_fdt_reserve_self();
205 early_init_fdt_scan_reserved_mem();
206
204 unflatten_and_copy_device_tree(); 207 unflatten_and_copy_device_tree();
205 208
206 setup_cpuinfo(); 209 setup_cpuinfo();
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index edfbf9d6a6dd..8442727f28d2 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -65,6 +65,15 @@ struct exception_table_entry {
65 ".previous\n" 65 ".previous\n"
66 66
67/* 67/*
68 * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
69 * (with lowest bit set) for which the fault handler in fixup_exception() will
70 * load -EFAULT into %r8 for a read or write fault, and zeroes the target
71 * register in case of a read fault in get_user().
72 */
73#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
74 ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
75
76/*
68 * The page fault handler stores, in a per-cpu area, the following information 77 * The page fault handler stores, in a per-cpu area, the following information
69 * if a fixup routine is available. 78 * if a fixup routine is available.
70 */ 79 */
@@ -91,7 +100,7 @@ struct exception_data {
91#define __get_user(x, ptr) \ 100#define __get_user(x, ptr) \
92({ \ 101({ \
93 register long __gu_err __asm__ ("r8") = 0; \ 102 register long __gu_err __asm__ ("r8") = 0; \
94 register long __gu_val __asm__ ("r9") = 0; \ 103 register long __gu_val; \
95 \ 104 \
96 load_sr2(); \ 105 load_sr2(); \
97 switch (sizeof(*(ptr))) { \ 106 switch (sizeof(*(ptr))) { \
@@ -107,22 +116,23 @@ struct exception_data {
107}) 116})
108 117
109#define __get_user_asm(ldx, ptr) \ 118#define __get_user_asm(ldx, ptr) \
110 __asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t" \ 119 __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \
111 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\ 120 "9:\n" \
121 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
112 : "=r"(__gu_val), "=r"(__gu_err) \ 122 : "=r"(__gu_val), "=r"(__gu_err) \
113 : "r"(ptr), "1"(__gu_err) \ 123 : "r"(ptr), "1"(__gu_err));
114 : "r1");
115 124
116#if !defined(CONFIG_64BIT) 125#if !defined(CONFIG_64BIT)
117 126
118#define __get_user_asm64(ptr) \ 127#define __get_user_asm64(ptr) \
119 __asm__("\n1:\tldw 0(%%sr2,%2),%0" \ 128 __asm__(" copy %%r0,%R0\n" \
120 "\n2:\tldw 4(%%sr2,%2),%R0\n\t" \ 129 "1: ldw 0(%%sr2,%2),%0\n" \
121 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\ 130 "2: ldw 4(%%sr2,%2),%R0\n" \
122 ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\ 131 "9:\n" \
132 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
133 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
123 : "=r"(__gu_val), "=r"(__gu_err) \ 134 : "=r"(__gu_val), "=r"(__gu_err) \
124 : "r"(ptr), "1"(__gu_err) \ 135 : "r"(ptr), "1"(__gu_err));
125 : "r1");
126 136
127#endif /* !defined(CONFIG_64BIT) */ 137#endif /* !defined(CONFIG_64BIT) */
128 138
@@ -148,32 +158,31 @@ struct exception_data {
148 * The "__put_user/kernel_asm()" macros tell gcc they read from memory 158 * The "__put_user/kernel_asm()" macros tell gcc they read from memory
149 * instead of writing. This is because they do not write to any memory 159 * instead of writing. This is because they do not write to any memory
150 * gcc knows about, so there are no aliasing issues. These macros must 160 * gcc knows about, so there are no aliasing issues. These macros must
151 * also be aware that "fixup_put_user_skip_[12]" are executed in the 161 * also be aware that fixups are executed in the context of the fault,
152 * context of the fault, and any registers used there must be listed 162 * and any registers used there must be listed as clobbers.
153 * as clobbers. In this case only "r1" is used by the current routines. 163 * r8 is already listed as err.
154 * r8/r9 are already listed as err/val.
155 */ 164 */
156 165
157#define __put_user_asm(stx, x, ptr) \ 166#define __put_user_asm(stx, x, ptr) \
158 __asm__ __volatile__ ( \ 167 __asm__ __volatile__ ( \
159 "\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t" \ 168 "1: " stx " %2,0(%%sr2,%1)\n" \
160 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\ 169 "9:\n" \
170 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
161 : "=r"(__pu_err) \ 171 : "=r"(__pu_err) \
162 : "r"(ptr), "r"(x), "0"(__pu_err) \ 172 : "r"(ptr), "r"(x), "0"(__pu_err))
163 : "r1")
164 173
165 174
166#if !defined(CONFIG_64BIT) 175#if !defined(CONFIG_64BIT)
167 176
168#define __put_user_asm64(__val, ptr) do { \ 177#define __put_user_asm64(__val, ptr) do { \
169 __asm__ __volatile__ ( \ 178 __asm__ __volatile__ ( \
170 "\n1:\tstw %2,0(%%sr2,%1)" \ 179 "1: stw %2,0(%%sr2,%1)\n" \
171 "\n2:\tstw %R2,4(%%sr2,%1)\n\t" \ 180 "2: stw %R2,4(%%sr2,%1)\n" \
172 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\ 181 "9:\n" \
173 ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\ 182 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
183 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
174 : "=r"(__pu_err) \ 184 : "=r"(__pu_err) \
175 : "r"(ptr), "r"(__val), "0"(__pu_err) \ 185 : "r"(ptr), "r"(__val), "0"(__pu_err)); \
176 : "r1"); \
177} while (0) 186} while (0)
178 187
179#endif /* !defined(CONFIG_64BIT) */ 188#endif /* !defined(CONFIG_64BIT) */
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index 7484b3d11e0d..c6d6272a934f 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -47,16 +47,6 @@ EXPORT_SYMBOL(__cmpxchg_u64);
47EXPORT_SYMBOL(lclear_user); 47EXPORT_SYMBOL(lclear_user);
48EXPORT_SYMBOL(lstrnlen_user); 48EXPORT_SYMBOL(lstrnlen_user);
49 49
50/* Global fixups - defined as int to avoid creation of function pointers */
51extern int fixup_get_user_skip_1;
52extern int fixup_get_user_skip_2;
53extern int fixup_put_user_skip_1;
54extern int fixup_put_user_skip_2;
55EXPORT_SYMBOL(fixup_get_user_skip_1);
56EXPORT_SYMBOL(fixup_get_user_skip_2);
57EXPORT_SYMBOL(fixup_put_user_skip_1);
58EXPORT_SYMBOL(fixup_put_user_skip_2);
59
60#ifndef CONFIG_64BIT 50#ifndef CONFIG_64BIT
61/* Needed so insmod can set dp value */ 51/* Needed so insmod can set dp value */
62extern int $global$; 52extern int $global$;
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index b76f503eee4a..4516a5b53f38 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -143,6 +143,8 @@ void machine_power_off(void)
143 printk(KERN_EMERG "System shut down completed.\n" 143 printk(KERN_EMERG "System shut down completed.\n"
144 "Please power this system off now."); 144 "Please power this system off now.");
145 145
146 /* prevent soft lockup/stalled CPU messages for endless loop. */
147 rcu_sysrq_start();
146 for (;;); 148 for (;;);
147} 149}
148 150
diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile
index 8fa92b8d839a..f2dac4d73b1b 100644
--- a/arch/parisc/lib/Makefile
+++ b/arch/parisc/lib/Makefile
@@ -2,7 +2,7 @@
2# Makefile for parisc-specific library files 2# Makefile for parisc-specific library files
3# 3#
4 4
5lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \ 5lib-y := lusercopy.o bitops.o checksum.o io.o memset.o memcpy.o \
6 ucmpdi2.o delay.o 6 ucmpdi2.o delay.o
7 7
8obj-y := iomap.o 8obj-y := iomap.o
diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S
deleted file mode 100644
index a5b72f22c7a6..000000000000
--- a/arch/parisc/lib/fixup.S
+++ /dev/null
@@ -1,98 +0,0 @@
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * Copyright (C) 2004 Randolph Chung <tausq@debian.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 *
20 * Fixup routines for kernel exception handling.
21 */
22#include <asm/asm-offsets.h>
23#include <asm/assembly.h>
24#include <asm/errno.h>
25#include <linux/linkage.h>
26
27#ifdef CONFIG_SMP
28 .macro get_fault_ip t1 t2
29 loadgp
30 addil LT%__per_cpu_offset,%r27
31 LDREG RT%__per_cpu_offset(%r1),\t1
32 /* t2 = smp_processor_id() */
33 mfctl 30,\t2
34 ldw TI_CPU(\t2),\t2
35#ifdef CONFIG_64BIT
36 extrd,u \t2,63,32,\t2
37#endif
38 /* t2 = &__per_cpu_offset[smp_processor_id()]; */
39 LDREGX \t2(\t1),\t2
40 addil LT%exception_data,%r27
41 LDREG RT%exception_data(%r1),\t1
42 /* t1 = this_cpu_ptr(&exception_data) */
43 add,l \t1,\t2,\t1
44 /* %r27 = t1->fault_gp - restore gp */
45 LDREG EXCDATA_GP(\t1), %r27
46 /* t1 = t1->fault_ip */
47 LDREG EXCDATA_IP(\t1), \t1
48 .endm
49#else
50 .macro get_fault_ip t1 t2
51 loadgp
52 /* t1 = this_cpu_ptr(&exception_data) */
53 addil LT%exception_data,%r27
54 LDREG RT%exception_data(%r1),\t2
55 /* %r27 = t2->fault_gp - restore gp */
56 LDREG EXCDATA_GP(\t2), %r27
57 /* t1 = t2->fault_ip */
58 LDREG EXCDATA_IP(\t2), \t1
59 .endm
60#endif
61
62 .level LEVEL
63
64 .text
65 .section .fixup, "ax"
66
67 /* get_user() fixups, store -EFAULT in r8, and 0 in r9 */
68ENTRY_CFI(fixup_get_user_skip_1)
69 get_fault_ip %r1,%r8
70 ldo 4(%r1), %r1
71 ldi -EFAULT, %r8
72 bv %r0(%r1)
73 copy %r0, %r9
74ENDPROC_CFI(fixup_get_user_skip_1)
75
76ENTRY_CFI(fixup_get_user_skip_2)
77 get_fault_ip %r1,%r8
78 ldo 8(%r1), %r1
79 ldi -EFAULT, %r8
80 bv %r0(%r1)
81 copy %r0, %r9
82ENDPROC_CFI(fixup_get_user_skip_2)
83
84 /* put_user() fixups, store -EFAULT in r8 */
85ENTRY_CFI(fixup_put_user_skip_1)
86 get_fault_ip %r1,%r8
87 ldo 4(%r1), %r1
88 bv %r0(%r1)
89 ldi -EFAULT, %r8
90ENDPROC_CFI(fixup_put_user_skip_1)
91
92ENTRY_CFI(fixup_put_user_skip_2)
93 get_fault_ip %r1,%r8
94 ldo 8(%r1), %r1
95 bv %r0(%r1)
96 ldi -EFAULT, %r8
97ENDPROC_CFI(fixup_put_user_skip_2)
98
diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S
index 56845de6b5df..85c28bb80fb7 100644
--- a/arch/parisc/lib/lusercopy.S
+++ b/arch/parisc/lib/lusercopy.S
@@ -5,6 +5,8 @@
5 * Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org> 5 * Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
6 * Copyright (C) 2001 Matthieu Delahaye <delahaym at esiee.fr> 6 * Copyright (C) 2001 Matthieu Delahaye <delahaym at esiee.fr>
7 * Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org> 7 * Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
8 * Copyright (C) 2017 Helge Deller <deller@gmx.de>
9 * Copyright (C) 2017 John David Anglin <dave.anglin@bell.net>
8 * 10 *
9 * 11 *
10 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
@@ -132,4 +134,321 @@ ENDPROC_CFI(lstrnlen_user)
132 134
133 .procend 135 .procend
134 136
137
138
139/*
140 * unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
141 *
142 * Inputs:
143 * - sr1 already contains space of source region
144 * - sr2 already contains space of destination region
145 *
146 * Returns:
147 * - number of bytes that could not be copied.
148 * On success, this will be zero.
149 *
150 * This code is based on a C-implementation of a copy routine written by
151 * Randolph Chung, which in turn was derived from the glibc.
152 *
153 * Several strategies are tried to try to get the best performance for various
154 * conditions. In the optimal case, we copy by loops that copy 32- or 16-bytes
155 * at a time using general registers. Unaligned copies are handled either by
156 * aligning the destination and then using shift-and-write method, or in a few
157 * cases by falling back to a byte-at-a-time copy.
158 *
159 * Testing with various alignments and buffer sizes shows that this code is
160 * often >10x faster than a simple byte-at-a-time copy, even for strangely
161 * aligned operands. It is interesting to note that the glibc version of memcpy
162 * (written in C) is actually quite fast already. This routine is able to beat
163 * it by 30-40% for aligned copies because of the loop unrolling, but in some
164 * cases the glibc version is still slightly faster. This lends more
165 * credibility that gcc can generate very good code as long as we are careful.
166 *
167 * Possible optimizations:
168 * - add cache prefetching
169 * - try not to use the post-increment address modifiers; they may create
170 * additional interlocks. Assumption is that those were only efficient on old
171 * machines (pre PA8000 processors)
172 */
173
174 dst = arg0
175 src = arg1
176 len = arg2
177 end = arg3
178 t1 = r19
179 t2 = r20
180 t3 = r21
181 t4 = r22
182 srcspc = sr1
183 dstspc = sr2
184
185 t0 = r1
186 a1 = t1
187 a2 = t2
188 a3 = t3
189 a0 = t4
190
191 save_src = ret0
192 save_dst = ret1
193 save_len = r31
194
195ENTRY_CFI(pa_memcpy)
196 .proc
197 .callinfo NO_CALLS
198 .entry
199
200 /* Last destination address */
201 add dst,len,end
202
203 /* short copy with less than 16 bytes? */
204 cmpib,COND(>>=),n 15,len,.Lbyte_loop
205
206 /* same alignment? */
207 xor src,dst,t0
208 extru t0,31,2,t1
209 cmpib,<>,n 0,t1,.Lunaligned_copy
210
211#ifdef CONFIG_64BIT
212 /* only do 64-bit copies if we can get aligned. */
213 extru t0,31,3,t1
214 cmpib,<>,n 0,t1,.Lalign_loop32
215
216 /* loop until we are 64-bit aligned */
217.Lalign_loop64:
218 extru dst,31,3,t1
219 cmpib,=,n 0,t1,.Lcopy_loop_16_start
22020: ldb,ma 1(srcspc,src),t1
22121: stb,ma t1,1(dstspc,dst)
222 b .Lalign_loop64
223 ldo -1(len),len
224
225 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
226 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
227
228.Lcopy_loop_16_start:
229 ldi 31,t0
230.Lcopy_loop_16:
231 cmpb,COND(>>=),n t0,len,.Lword_loop
232
23310: ldd 0(srcspc,src),t1
23411: ldd 8(srcspc,src),t2
235 ldo 16(src),src
23612: std,ma t1,8(dstspc,dst)
23713: std,ma t2,8(dstspc,dst)
23814: ldd 0(srcspc,src),t1
23915: ldd 8(srcspc,src),t2
240 ldo 16(src),src
24116: std,ma t1,8(dstspc,dst)
24217: std,ma t2,8(dstspc,dst)
243
244 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
245 ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy16_fault)
246 ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
247 ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
248 ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
249 ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy16_fault)
250 ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
251 ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
252
253 b .Lcopy_loop_16
254 ldo -32(len),len
255
256.Lword_loop:
257 cmpib,COND(>>=),n 3,len,.Lbyte_loop
25820: ldw,ma 4(srcspc,src),t1
25921: stw,ma t1,4(dstspc,dst)
260 b .Lword_loop
261 ldo -4(len),len
262
263 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
264 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
265
266#endif /* CONFIG_64BIT */
267
268 /* loop until we are 32-bit aligned */
269.Lalign_loop32:
270 extru dst,31,2,t1
271 cmpib,=,n 0,t1,.Lcopy_loop_8
27220: ldb,ma 1(srcspc,src),t1
27321: stb,ma t1,1(dstspc,dst)
274 b .Lalign_loop32
275 ldo -1(len),len
276
277 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
278 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
279
280
281.Lcopy_loop_8:
282 cmpib,COND(>>=),n 15,len,.Lbyte_loop
283
28410: ldw 0(srcspc,src),t1
28511: ldw 4(srcspc,src),t2
28612: stw,ma t1,4(dstspc,dst)
28713: stw,ma t2,4(dstspc,dst)
28814: ldw 8(srcspc,src),t1
28915: ldw 12(srcspc,src),t2
290 ldo 16(src),src
29116: stw,ma t1,4(dstspc,dst)
29217: stw,ma t2,4(dstspc,dst)
293
294 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
295 ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy8_fault)
296 ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
297 ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
298 ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
299 ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy8_fault)
300 ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
301 ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
302
303 b .Lcopy_loop_8
304 ldo -16(len),len
305
306.Lbyte_loop:
307 cmpclr,COND(<>) len,%r0,%r0
308 b,n .Lcopy_done
30920: ldb 0(srcspc,src),t1
310 ldo 1(src),src
31121: stb,ma t1,1(dstspc,dst)
312 b .Lbyte_loop
313 ldo -1(len),len
314
315 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
316 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
317
318.Lcopy_done:
319 bv %r0(%r2)
320 sub end,dst,ret0
321
322
323 /* src and dst are not aligned the same way. */
324 /* need to go the hard way */
325.Lunaligned_copy:
326 /* align until dst is 32bit-word-aligned */
327 extru dst,31,2,t1
328 cmpib,=,n 0,t1,.Lcopy_dstaligned
32920: ldb 0(srcspc,src),t1
330 ldo 1(src),src
33121: stb,ma t1,1(dstspc,dst)
332 b .Lunaligned_copy
333 ldo -1(len),len
334
335 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
336 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
337
338.Lcopy_dstaligned:
339
340 /* store src, dst and len in safe place */
341 copy src,save_src
342 copy dst,save_dst
343 copy len,save_len
344
345 /* len now needs give number of words to copy */
346 SHRREG len,2,len
347
348 /*
349 * Copy from a not-aligned src to an aligned dst using shifts.
350 * Handles 4 words per loop.
351 */
352
353 depw,z src,28,2,t0
354 subi 32,t0,t0
355 mtsar t0
356 extru len,31,2,t0
357 cmpib,= 2,t0,.Lcase2
358 /* Make src aligned by rounding it down. */
359 depi 0,31,2,src
360
361 cmpiclr,<> 3,t0,%r0
362 b,n .Lcase3
363 cmpiclr,<> 1,t0,%r0
364 b,n .Lcase1
365.Lcase0:
366 cmpb,COND(=) %r0,len,.Lcda_finish
367 nop
368
3691: ldw,ma 4(srcspc,src), a3
370 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
3711: ldw,ma 4(srcspc,src), a0
372 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
373 b,n .Ldo3
374.Lcase1:
3751: ldw,ma 4(srcspc,src), a2
376 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
3771: ldw,ma 4(srcspc,src), a3
378 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
379 ldo -1(len),len
380 cmpb,COND(=),n %r0,len,.Ldo0
381.Ldo4:
3821: ldw,ma 4(srcspc,src), a0
383 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
384 shrpw a2, a3, %sar, t0
3851: stw,ma t0, 4(dstspc,dst)
386 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
387.Ldo3:
3881: ldw,ma 4(srcspc,src), a1
389 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
390 shrpw a3, a0, %sar, t0
3911: stw,ma t0, 4(dstspc,dst)
392 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
393.Ldo2:
3941: ldw,ma 4(srcspc,src), a2
395 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
396 shrpw a0, a1, %sar, t0
3971: stw,ma t0, 4(dstspc,dst)
398 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
399.Ldo1:
4001: ldw,ma 4(srcspc,src), a3
401 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
402 shrpw a1, a2, %sar, t0
4031: stw,ma t0, 4(dstspc,dst)
404 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
405 ldo -4(len),len
406 cmpb,COND(<>) %r0,len,.Ldo4
407 nop
408.Ldo0:
409 shrpw a2, a3, %sar, t0
4101: stw,ma t0, 4(dstspc,dst)
411 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
412
413.Lcda_rdfault:
414.Lcda_finish:
415 /* calculate new src, dst and len and jump to byte-copy loop */
416 sub dst,save_dst,t0
417 add save_src,t0,src
418 b .Lbyte_loop
419 sub save_len,t0,len
420
421.Lcase3:
4221: ldw,ma 4(srcspc,src), a0
423 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
4241: ldw,ma 4(srcspc,src), a1
425 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
426 b .Ldo2
427 ldo 1(len),len
428.Lcase2:
4291: ldw,ma 4(srcspc,src), a1
430 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
4311: ldw,ma 4(srcspc,src), a2
432 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
433 b .Ldo1
434 ldo 2(len),len
435
436
437 /* fault exception fixup handlers: */
438#ifdef CONFIG_64BIT
439.Lcopy16_fault:
440 b .Lcopy_done
44110: std,ma t1,8(dstspc,dst)
442 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
443#endif
444
445.Lcopy8_fault:
446 b .Lcopy_done
44710: stw,ma t1,4(dstspc,dst)
448 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
449
450 .exit
451ENDPROC_CFI(pa_memcpy)
452 .procend
453
135 .end 454 .end
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index f82ff10ed974..b3d47ec1d80a 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -2,7 +2,7 @@
2 * Optimized memory copy routines. 2 * Optimized memory copy routines.
3 * 3 *
4 * Copyright (C) 2004 Randolph Chung <tausq@debian.org> 4 * Copyright (C) 2004 Randolph Chung <tausq@debian.org>
5 * Copyright (C) 2013 Helge Deller <deller@gmx.de> 5 * Copyright (C) 2013-2017 Helge Deller <deller@gmx.de>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -21,474 +21,21 @@
21 * Portions derived from the GNU C Library 21 * Portions derived from the GNU C Library
22 * Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc. 22 * Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc.
23 * 23 *
24 * Several strategies are tried to try to get the best performance for various
25 * conditions. In the optimal case, we copy 64-bytes in an unrolled loop using
26 * fp regs. This is followed by loops that copy 32- or 16-bytes at a time using
27 * general registers. Unaligned copies are handled either by aligning the
28 * destination and then using shift-and-write method, or in a few cases by
29 * falling back to a byte-at-a-time copy.
30 *
31 * I chose to implement this in C because it is easier to maintain and debug,
32 * and in my experiments it appears that the C code generated by gcc (3.3/3.4
33 * at the time of writing) is fairly optimal. Unfortunately some of the
34 * semantics of the copy routine (exception handling) is difficult to express
35 * in C, so we have to play some tricks to get it to work.
36 *
37 * All the loads and stores are done via explicit asm() code in order to use
38 * the right space registers.
39 *
40 * Testing with various alignments and buffer sizes shows that this code is
41 * often >10x faster than a simple byte-at-a-time copy, even for strangely
42 * aligned operands. It is interesting to note that the glibc version
43 * of memcpy (written in C) is actually quite fast already. This routine is
44 * able to beat it by 30-40% for aligned copies because of the loop unrolling,
45 * but in some cases the glibc version is still slightly faster. This lends
46 * more credibility that gcc can generate very good code as long as we are
47 * careful.
48 *
49 * TODO:
50 * - cache prefetching needs more experimentation to get optimal settings
51 * - try not to use the post-increment address modifiers; they create additional
52 * interlocks
53 * - replace byte-copy loops with stybs sequences
54 */ 24 */
55 25
56#ifdef __KERNEL__
57#include <linux/module.h> 26#include <linux/module.h>
58#include <linux/compiler.h> 27#include <linux/compiler.h>
59#include <linux/uaccess.h> 28#include <linux/uaccess.h>
60#define s_space "%%sr1"
61#define d_space "%%sr2"
62#else
63#include "memcpy.h"
64#define s_space "%%sr0"
65#define d_space "%%sr0"
66#define pa_memcpy new2_copy
67#endif
68 29
69DECLARE_PER_CPU(struct exception_data, exception_data); 30DECLARE_PER_CPU(struct exception_data, exception_data);
70 31
71#define preserve_branch(label) do { \
72 volatile int dummy = 0; \
73 /* The following branch is never taken, it's just here to */ \
74 /* prevent gcc from optimizing away our exception code. */ \
75 if (unlikely(dummy != dummy)) \
76 goto label; \
77} while (0)
78
79#define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3)) 32#define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3))
80#define get_kernel_space() (0) 33#define get_kernel_space() (0)
81 34
82#define MERGE(w0, sh_1, w1, sh_2) ({ \
83 unsigned int _r; \
84 asm volatile ( \
85 "mtsar %3\n" \
86 "shrpw %1, %2, %%sar, %0\n" \
87 : "=r"(_r) \
88 : "r"(w0), "r"(w1), "r"(sh_2) \
89 ); \
90 _r; \
91})
92#define THRESHOLD 16
93
94#ifdef DEBUG_MEMCPY
95#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
96#else
97#define DPRINTF(fmt, args...)
98#endif
99
100#define def_load_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
101 __asm__ __volatile__ ( \
102 "1:\t" #_insn ",ma " #_sz "(" _s ",%1), %0\n\t" \
103 ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
104 : _tt(_t), "+r"(_a) \
105 : \
106 : "r8")
107
108#define def_store_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
109 __asm__ __volatile__ ( \
110 "1:\t" #_insn ",ma %1, " #_sz "(" _s ",%0)\n\t" \
111 ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
112 : "+r"(_a) \
113 : _tt(_t) \
114 : "r8")
115
116#define ldbma(_s, _a, _t, _e) def_load_ai_insn(ldbs,1,"=r",_s,_a,_t,_e)
117#define stbma(_s, _t, _a, _e) def_store_ai_insn(stbs,1,"r",_s,_a,_t,_e)
118#define ldwma(_s, _a, _t, _e) def_load_ai_insn(ldw,4,"=r",_s,_a,_t,_e)
119#define stwma(_s, _t, _a, _e) def_store_ai_insn(stw,4,"r",_s,_a,_t,_e)
120#define flddma(_s, _a, _t, _e) def_load_ai_insn(fldd,8,"=f",_s,_a,_t,_e)
121#define fstdma(_s, _t, _a, _e) def_store_ai_insn(fstd,8,"f",_s,_a,_t,_e)
122
123#define def_load_insn(_insn,_tt,_s,_o,_a,_t,_e) \
124 __asm__ __volatile__ ( \
125 "1:\t" #_insn " " #_o "(" _s ",%1), %0\n\t" \
126 ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
127 : _tt(_t) \
128 : "r"(_a) \
129 : "r8")
130
131#define def_store_insn(_insn,_tt,_s,_t,_o,_a,_e) \
132 __asm__ __volatile__ ( \
133 "1:\t" #_insn " %0, " #_o "(" _s ",%1)\n\t" \
134 ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
135 : \
136 : _tt(_t), "r"(_a) \
137 : "r8")
138
139#define ldw(_s,_o,_a,_t,_e) def_load_insn(ldw,"=r",_s,_o,_a,_t,_e)
140#define stw(_s,_t,_o,_a,_e) def_store_insn(stw,"r",_s,_t,_o,_a,_e)
141
142#ifdef CONFIG_PREFETCH
143static inline void prefetch_src(const void *addr)
144{
145 __asm__("ldw 0(" s_space ",%0), %%r0" : : "r" (addr));
146}
147
148static inline void prefetch_dst(const void *addr)
149{
150 __asm__("ldd 0(" d_space ",%0), %%r0" : : "r" (addr));
151}
152#else
153#define prefetch_src(addr) do { } while(0)
154#define prefetch_dst(addr) do { } while(0)
155#endif
156
157#define PA_MEMCPY_OK 0
158#define PA_MEMCPY_LOAD_ERROR 1
159#define PA_MEMCPY_STORE_ERROR 2
160
161/* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words
162 * per loop. This code is derived from glibc.
163 */
164static noinline unsigned long copy_dstaligned(unsigned long dst,
165 unsigned long src, unsigned long len)
166{
167 /* gcc complains that a2 and a3 may be uninitialized, but actually
168 * they cannot be. Initialize a2/a3 to shut gcc up.
169 */
170 register unsigned int a0, a1, a2 = 0, a3 = 0;
171 int sh_1, sh_2;
172
173 /* prefetch_src((const void *)src); */
174
175 /* Calculate how to shift a word read at the memory operation
176 aligned srcp to make it aligned for copy. */
177 sh_1 = 8 * (src % sizeof(unsigned int));
178 sh_2 = 8 * sizeof(unsigned int) - sh_1;
179
180 /* Make src aligned by rounding it down. */
181 src &= -sizeof(unsigned int);
182
183 switch (len % 4)
184 {
185 case 2:
186 /* a1 = ((unsigned int *) src)[0];
187 a2 = ((unsigned int *) src)[1]; */
188 ldw(s_space, 0, src, a1, cda_ldw_exc);
189 ldw(s_space, 4, src, a2, cda_ldw_exc);
190 src -= 1 * sizeof(unsigned int);
191 dst -= 3 * sizeof(unsigned int);
192 len += 2;
193 goto do1;
194 case 3:
195 /* a0 = ((unsigned int *) src)[0];
196 a1 = ((unsigned int *) src)[1]; */
197 ldw(s_space, 0, src, a0, cda_ldw_exc);
198 ldw(s_space, 4, src, a1, cda_ldw_exc);
199 src -= 0 * sizeof(unsigned int);
200 dst -= 2 * sizeof(unsigned int);
201 len += 1;
202 goto do2;
203 case 0:
204 if (len == 0)
205 return PA_MEMCPY_OK;
206 /* a3 = ((unsigned int *) src)[0];
207 a0 = ((unsigned int *) src)[1]; */
208 ldw(s_space, 0, src, a3, cda_ldw_exc);
209 ldw(s_space, 4, src, a0, cda_ldw_exc);
210 src -=-1 * sizeof(unsigned int);
211 dst -= 1 * sizeof(unsigned int);
212 len += 0;
213 goto do3;
214 case 1:
215 /* a2 = ((unsigned int *) src)[0];
216 a3 = ((unsigned int *) src)[1]; */
217 ldw(s_space, 0, src, a2, cda_ldw_exc);
218 ldw(s_space, 4, src, a3, cda_ldw_exc);
219 src -=-2 * sizeof(unsigned int);
220 dst -= 0 * sizeof(unsigned int);
221 len -= 1;
222 if (len == 0)
223 goto do0;
224 goto do4; /* No-op. */
225 }
226
227 do
228 {
229 /* prefetch_src((const void *)(src + 4 * sizeof(unsigned int))); */
230do4:
231 /* a0 = ((unsigned int *) src)[0]; */
232 ldw(s_space, 0, src, a0, cda_ldw_exc);
233 /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
234 stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
235do3:
236 /* a1 = ((unsigned int *) src)[1]; */
237 ldw(s_space, 4, src, a1, cda_ldw_exc);
238 /* ((unsigned int *) dst)[1] = MERGE (a3, sh_1, a0, sh_2); */
239 stw(d_space, MERGE (a3, sh_1, a0, sh_2), 4, dst, cda_stw_exc);
240do2:
241 /* a2 = ((unsigned int *) src)[2]; */
242 ldw(s_space, 8, src, a2, cda_ldw_exc);
243 /* ((unsigned int *) dst)[2] = MERGE (a0, sh_1, a1, sh_2); */
244 stw(d_space, MERGE (a0, sh_1, a1, sh_2), 8, dst, cda_stw_exc);
245do1:
246 /* a3 = ((unsigned int *) src)[3]; */
247 ldw(s_space, 12, src, a3, cda_ldw_exc);
248 /* ((unsigned int *) dst)[3] = MERGE (a1, sh_1, a2, sh_2); */
249 stw(d_space, MERGE (a1, sh_1, a2, sh_2), 12, dst, cda_stw_exc);
250
251 src += 4 * sizeof(unsigned int);
252 dst += 4 * sizeof(unsigned int);
253 len -= 4;
254 }
255 while (len != 0);
256
257do0:
258 /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
259 stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
260
261 preserve_branch(handle_load_error);
262 preserve_branch(handle_store_error);
263
264 return PA_MEMCPY_OK;
265
266handle_load_error:
267 __asm__ __volatile__ ("cda_ldw_exc:\n");
268 return PA_MEMCPY_LOAD_ERROR;
269
270handle_store_error:
271 __asm__ __volatile__ ("cda_stw_exc:\n");
272 return PA_MEMCPY_STORE_ERROR;
273}
274
275
276/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR.
277 * In case of an access fault the faulty address can be read from the per_cpu
278 * exception data struct. */
279static noinline unsigned long pa_memcpy_internal(void *dstp, const void *srcp,
280 unsigned long len)
281{
282 register unsigned long src, dst, t1, t2, t3;
283 register unsigned char *pcs, *pcd;
284 register unsigned int *pws, *pwd;
285 register double *pds, *pdd;
286 unsigned long ret;
287
288 src = (unsigned long)srcp;
289 dst = (unsigned long)dstp;
290 pcs = (unsigned char *)srcp;
291 pcd = (unsigned char *)dstp;
292
293 /* prefetch_src((const void *)srcp); */
294
295 if (len < THRESHOLD)
296 goto byte_copy;
297
298 /* Check alignment */
299 t1 = (src ^ dst);
300 if (unlikely(t1 & (sizeof(double)-1)))
301 goto unaligned_copy;
302
303 /* src and dst have same alignment. */
304
305 /* Copy bytes till we are double-aligned. */
306 t2 = src & (sizeof(double) - 1);
307 if (unlikely(t2 != 0)) {
308 t2 = sizeof(double) - t2;
309 while (t2 && len) {
310 /* *pcd++ = *pcs++; */
311 ldbma(s_space, pcs, t3, pmc_load_exc);
312 len--;
313 stbma(d_space, t3, pcd, pmc_store_exc);
314 t2--;
315 }
316 }
317
318 pds = (double *)pcs;
319 pdd = (double *)pcd;
320
321#if 0
322 /* Copy 8 doubles at a time */
323 while (len >= 8*sizeof(double)) {
324 register double r1, r2, r3, r4, r5, r6, r7, r8;
325 /* prefetch_src((char *)pds + L1_CACHE_BYTES); */
326 flddma(s_space, pds, r1, pmc_load_exc);
327 flddma(s_space, pds, r2, pmc_load_exc);
328 flddma(s_space, pds, r3, pmc_load_exc);
329 flddma(s_space, pds, r4, pmc_load_exc);
330 fstdma(d_space, r1, pdd, pmc_store_exc);
331 fstdma(d_space, r2, pdd, pmc_store_exc);
332 fstdma(d_space, r3, pdd, pmc_store_exc);
333 fstdma(d_space, r4, pdd, pmc_store_exc);
334
335#if 0
336 if (L1_CACHE_BYTES <= 32)
337 prefetch_src((char *)pds + L1_CACHE_BYTES);
338#endif
339 flddma(s_space, pds, r5, pmc_load_exc);
340 flddma(s_space, pds, r6, pmc_load_exc);
341 flddma(s_space, pds, r7, pmc_load_exc);
342 flddma(s_space, pds, r8, pmc_load_exc);
343 fstdma(d_space, r5, pdd, pmc_store_exc);
344 fstdma(d_space, r6, pdd, pmc_store_exc);
345 fstdma(d_space, r7, pdd, pmc_store_exc);
346 fstdma(d_space, r8, pdd, pmc_store_exc);
347 len -= 8*sizeof(double);
348 }
349#endif
350
351 pws = (unsigned int *)pds;
352 pwd = (unsigned int *)pdd;
353
354word_copy:
355 while (len >= 8*sizeof(unsigned int)) {
356 register unsigned int r1,r2,r3,r4,r5,r6,r7,r8;
357 /* prefetch_src((char *)pws + L1_CACHE_BYTES); */
358 ldwma(s_space, pws, r1, pmc_load_exc);
359 ldwma(s_space, pws, r2, pmc_load_exc);
360 ldwma(s_space, pws, r3, pmc_load_exc);
361 ldwma(s_space, pws, r4, pmc_load_exc);
362 stwma(d_space, r1, pwd, pmc_store_exc);
363 stwma(d_space, r2, pwd, pmc_store_exc);
364 stwma(d_space, r3, pwd, pmc_store_exc);
365 stwma(d_space, r4, pwd, pmc_store_exc);
366
367 ldwma(s_space, pws, r5, pmc_load_exc);
368 ldwma(s_space, pws, r6, pmc_load_exc);
369 ldwma(s_space, pws, r7, pmc_load_exc);
370 ldwma(s_space, pws, r8, pmc_load_exc);
371 stwma(d_space, r5, pwd, pmc_store_exc);
372 stwma(d_space, r6, pwd, pmc_store_exc);
373 stwma(d_space, r7, pwd, pmc_store_exc);
374 stwma(d_space, r8, pwd, pmc_store_exc);
375 len -= 8*sizeof(unsigned int);
376 }
377
378 while (len >= 4*sizeof(unsigned int)) {
379 register unsigned int r1,r2,r3,r4;
380 ldwma(s_space, pws, r1, pmc_load_exc);
381 ldwma(s_space, pws, r2, pmc_load_exc);
382 ldwma(s_space, pws, r3, pmc_load_exc);
383 ldwma(s_space, pws, r4, pmc_load_exc);
384 stwma(d_space, r1, pwd, pmc_store_exc);
385 stwma(d_space, r2, pwd, pmc_store_exc);
386 stwma(d_space, r3, pwd, pmc_store_exc);
387 stwma(d_space, r4, pwd, pmc_store_exc);
388 len -= 4*sizeof(unsigned int);
389 }
390
391 pcs = (unsigned char *)pws;
392 pcd = (unsigned char *)pwd;
393
394byte_copy:
395 while (len) {
396 /* *pcd++ = *pcs++; */
397 ldbma(s_space, pcs, t3, pmc_load_exc);
398 stbma(d_space, t3, pcd, pmc_store_exc);
399 len--;
400 }
401
402 return PA_MEMCPY_OK;
403
404unaligned_copy:
405 /* possibly we are aligned on a word, but not on a double... */
406 if (likely((t1 & (sizeof(unsigned int)-1)) == 0)) {
407 t2 = src & (sizeof(unsigned int) - 1);
408
409 if (unlikely(t2 != 0)) {
410 t2 = sizeof(unsigned int) - t2;
411 while (t2) {
412 /* *pcd++ = *pcs++; */
413 ldbma(s_space, pcs, t3, pmc_load_exc);
414 stbma(d_space, t3, pcd, pmc_store_exc);
415 len--;
416 t2--;
417 }
418 }
419
420 pws = (unsigned int *)pcs;
421 pwd = (unsigned int *)pcd;
422 goto word_copy;
423 }
424
425 /* Align the destination. */
426 if (unlikely((dst & (sizeof(unsigned int) - 1)) != 0)) {
427 t2 = sizeof(unsigned int) - (dst & (sizeof(unsigned int) - 1));
428 while (t2) {
429 /* *pcd++ = *pcs++; */
430 ldbma(s_space, pcs, t3, pmc_load_exc);
431 stbma(d_space, t3, pcd, pmc_store_exc);
432 len--;
433 t2--;
434 }
435 dst = (unsigned long)pcd;
436 src = (unsigned long)pcs;
437 }
438
439 ret = copy_dstaligned(dst, src, len / sizeof(unsigned int));
440 if (ret)
441 return ret;
442
443 pcs += (len & -sizeof(unsigned int));
444 pcd += (len & -sizeof(unsigned int));
445 len %= sizeof(unsigned int);
446
447 preserve_branch(handle_load_error);
448 preserve_branch(handle_store_error);
449
450 goto byte_copy;
451
452handle_load_error:
453 __asm__ __volatile__ ("pmc_load_exc:\n");
454 return PA_MEMCPY_LOAD_ERROR;
455
456handle_store_error:
457 __asm__ __volatile__ ("pmc_store_exc:\n");
458 return PA_MEMCPY_STORE_ERROR;
459}
460
461
462/* Returns 0 for success, otherwise, returns number of bytes not transferred. */ 35/* Returns 0 for success, otherwise, returns number of bytes not transferred. */
463static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) 36extern unsigned long pa_memcpy(void *dst, const void *src,
464{ 37 unsigned long len);
465 unsigned long ret, fault_addr, reference;
466 struct exception_data *d;
467
468 ret = pa_memcpy_internal(dstp, srcp, len);
469 if (likely(ret == PA_MEMCPY_OK))
470 return 0;
471
472 /* if a load or store fault occured we can get the faulty addr */
473 d = this_cpu_ptr(&exception_data);
474 fault_addr = d->fault_addr;
475
476 /* error in load or store? */
477 if (ret == PA_MEMCPY_LOAD_ERROR)
478 reference = (unsigned long) srcp;
479 else
480 reference = (unsigned long) dstp;
481 38
482 DPRINTF("pa_memcpy: fault type = %lu, len=%lu fault_addr=%lu ref=%lu\n",
483 ret, len, fault_addr, reference);
484
485 if (fault_addr >= reference)
486 return len - (fault_addr - reference);
487 else
488 return len;
489}
490
491#ifdef __KERNEL__
492unsigned long __copy_to_user(void __user *dst, const void *src, 39unsigned long __copy_to_user(void __user *dst, const void *src,
493 unsigned long len) 40 unsigned long len)
494{ 41{
@@ -537,5 +84,3 @@ long probe_kernel_read(void *dst, const void *src, size_t size)
537 84
538 return __probe_kernel_read(dst, src, size); 85 return __probe_kernel_read(dst, src, size);
539} 86}
540
541#endif
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index deab89a8915a..32ec22146141 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -150,6 +150,23 @@ int fixup_exception(struct pt_regs *regs)
150 d->fault_space = regs->isr; 150 d->fault_space = regs->isr;
151 d->fault_addr = regs->ior; 151 d->fault_addr = regs->ior;
152 152
153 /*
154 * Fix up get_user() and put_user().
155 * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant
156 * bit in the relative address of the fixup routine to indicate
157 * that %r8 should be loaded with -EFAULT to report a userspace
158 * access error.
159 */
160 if (fix->fixup & 1) {
161 regs->gr[8] = -EFAULT;
162
163 /* zero target register for get_user() */
164 if (parisc_acctyp(0, regs->iir) == VM_READ) {
165 int treg = regs->iir & 0x1f;
166 regs->gr[treg] = 0;
167 }
168 }
169
153 regs->iaoq[0] = (unsigned long)&fix->fixup + fix->fixup; 170 regs->iaoq[0] = (unsigned long)&fix->fixup + fix->fixup;
154 regs->iaoq[0] &= ~3; 171 regs->iaoq[0] &= ~3;
155 /* 172 /*
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
index 411994551afc..f058e0c3e4d4 100644
--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
@@ -33,10 +33,13 @@ static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len)
33 } 33 }
34 34
35 if (len & ~VMX_ALIGN_MASK) { 35 if (len & ~VMX_ALIGN_MASK) {
36 preempt_disable();
36 pagefault_disable(); 37 pagefault_disable();
37 enable_kernel_altivec(); 38 enable_kernel_altivec();
38 crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK); 39 crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK);
40 disable_kernel_altivec();
39 pagefault_enable(); 41 pagefault_enable();
42 preempt_enable();
40 } 43 }
41 44
42 tail = len & VMX_ALIGN_MASK; 45 tail = len & VMX_ALIGN_MASK;
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index cbc7c42cdb74..ec7a8b099dd9 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -807,14 +807,25 @@ int fix_alignment(struct pt_regs *regs)
807 nb = aligninfo[instr].len; 807 nb = aligninfo[instr].len;
808 flags = aligninfo[instr].flags; 808 flags = aligninfo[instr].flags;
809 809
810 /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */ 810 /*
811 if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) { 811 * Handle some cases which give overlaps in the DSISR values.
812 nb = 8; 812 */
813 flags = LD+SW; 813 if (IS_XFORM(instruction)) {
814 } else if (IS_XFORM(instruction) && 814 switch (get_xop(instruction)) {
815 ((instruction >> 1) & 0x3ff) == 660) { 815 case 532: /* ldbrx */
816 nb = 8; 816 nb = 8;
817 flags = ST+SW; 817 flags = LD+SW;
818 break;
819 case 660: /* stdbrx */
820 nb = 8;
821 flags = ST+SW;
822 break;
823 case 20: /* lwarx */
824 case 84: /* ldarx */
825 case 116: /* lharx */
826 case 276: /* lqarx */
827 return 0; /* not emulated ever */
828 }
818 } 829 }
819 830
820 /* Byteswap little endian loads and stores */ 831 /* Byteswap little endian loads and stores */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index ae179cb1bb3c..c119044cad0d 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -67,7 +67,7 @@ PPC64_CACHES:
67 * flush all bytes from start through stop-1 inclusive 67 * flush all bytes from start through stop-1 inclusive
68 */ 68 */
69 69
70_GLOBAL(flush_icache_range) 70_GLOBAL_TOC(flush_icache_range)
71BEGIN_FTR_SECTION 71BEGIN_FTR_SECTION
72 PURGE_PREFETCHED_INS 72 PURGE_PREFETCHED_INS
73 blr 73 blr
@@ -120,7 +120,7 @@ EXPORT_SYMBOL(flush_icache_range)
120 * 120 *
121 * flush all bytes from start to stop-1 inclusive 121 * flush all bytes from start to stop-1 inclusive
122 */ 122 */
123_GLOBAL(flush_dcache_range) 123_GLOBAL_TOC(flush_dcache_range)
124 124
125/* 125/*
126 * Flush the data cache to memory 126 * Flush the data cache to memory
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 9cfaa8b69b5f..f997154dfc41 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -236,6 +236,15 @@ static void cpu_ready_for_interrupts(void)
236 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); 236 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
237 } 237 }
238 238
239 /*
240 * Fixup HFSCR:TM based on CPU features. The bit is set by our
241 * early asm init because at that point we haven't updated our
242 * CPU features from firmware and device-tree. Here we have,
243 * so let's do it.
244 */
245 if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP))
246 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
247
239 /* Set IR and DR in PACA MSR */ 248 /* Set IR and DR in PACA MSR */
240 get_paca()->kernel_msr = MSR_KERNEL; 249 get_paca()->kernel_msr = MSR_KERNEL;
241} 250}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 8c68145ba1bd..710e491206ed 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -1487,6 +1487,10 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
1487 /* start new resize */ 1487 /* start new resize */
1488 1488
1489 resize = kzalloc(sizeof(*resize), GFP_KERNEL); 1489 resize = kzalloc(sizeof(*resize), GFP_KERNEL);
1490 if (!resize) {
1491 ret = -ENOMEM;
1492 goto out;
1493 }
1490 resize->order = shift; 1494 resize->order = shift;
1491 resize->kvm = kvm; 1495 resize->kvm = kvm;
1492 INIT_WORK(&resize->work, resize_hpt_prepare_work); 1496 INIT_WORK(&resize->work, resize_hpt_prepare_work);
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index cc332608e656..65bb8f33b399 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -638,6 +638,10 @@ static void native_flush_hash_range(unsigned long number, int local)
638 unsigned long psize = batch->psize; 638 unsigned long psize = batch->psize;
639 int ssize = batch->ssize; 639 int ssize = batch->ssize;
640 int i; 640 int i;
641 unsigned int use_local;
642
643 use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
644 mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
641 645
642 local_irq_save(flags); 646 local_irq_save(flags);
643 647
@@ -667,8 +671,7 @@ static void native_flush_hash_range(unsigned long number, int local)
667 } pte_iterate_hashed_end(); 671 } pte_iterate_hashed_end();
668 } 672 }
669 673
670 if (mmu_has_feature(MMU_FTR_TLBIEL) && 674 if (use_local) {
671 mmu_psize_defs[psize].tlbiel && local) {
672 asm volatile("ptesync":::"memory"); 675 asm volatile("ptesync":::"memory");
673 for (i = 0; i < number; i++) { 676 for (i = 0; i < number; i++) {
674 vpn = batch->vpn[i]; 677 vpn = batch->vpn[i];
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index fa95041fa9f6..33ca29333e18 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -141,31 +141,34 @@ static void check_ipl_parmblock(void *start, unsigned long size)
141 141
142unsigned long decompress_kernel(void) 142unsigned long decompress_kernel(void)
143{ 143{
144 unsigned long output_addr; 144 void *output, *kernel_end;
145 unsigned char *output;
146 145
147 output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL; 146 output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE);
148 check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start); 147 kernel_end = output + SZ__bss_start;
149 memset(&_bss, 0, &_ebss - &_bss); 148 check_ipl_parmblock((void *) 0, (unsigned long) kernel_end);
150 free_mem_ptr = (unsigned long)&_end;
151 free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
152 output = (unsigned char *) output_addr;
153 149
154#ifdef CONFIG_BLK_DEV_INITRD 150#ifdef CONFIG_BLK_DEV_INITRD
155 /* 151 /*
156 * Move the initrd right behind the end of the decompressed 152 * Move the initrd right behind the end of the decompressed
157 * kernel image. 153 * kernel image. This also prevents initrd corruption caused by
154 * bss clearing since kernel_end will always be located behind the
155 * current bss section..
158 */ 156 */
159 if (INITRD_START && INITRD_SIZE && 157 if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
160 INITRD_START < (unsigned long) output + SZ__bss_start) { 158 check_ipl_parmblock(kernel_end, INITRD_SIZE);
161 check_ipl_parmblock(output + SZ__bss_start, 159 memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
162 INITRD_START + INITRD_SIZE); 160 INITRD_START = (unsigned long) kernel_end;
163 memmove(output + SZ__bss_start,
164 (void *) INITRD_START, INITRD_SIZE);
165 INITRD_START = (unsigned long) output + SZ__bss_start;
166 } 161 }
167#endif 162#endif
168 163
164 /*
165 * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
166 * initialized afterwards since they reside in bss.
167 */
168 memset(&_bss, 0, &_ebss - &_bss);
169 free_mem_ptr = (unsigned long) &_end;
170 free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
171
169 puts("Uncompressing Linux... "); 172 puts("Uncompressing Linux... ");
170 __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error); 173 __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
171 puts("Ok, booting the kernel.\n"); 174 puts("Ok, booting the kernel.\n");
diff --git a/arch/s390/include/asm/sections.h b/arch/s390/include/asm/sections.h
index 5ce29fe100ba..fbd9116eb17b 100644
--- a/arch/s390/include/asm/sections.h
+++ b/arch/s390/include/asm/sections.h
@@ -4,6 +4,5 @@
4#include <asm-generic/sections.h> 4#include <asm-generic/sections.h>
5 5
6extern char _eshared[], _ehead[]; 6extern char _eshared[], _ehead[];
7extern char __start_ro_after_init[], __end_ro_after_init[];
8 7
9#endif 8#endif
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 136932ff4250..3ea1554d04b3 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -147,7 +147,7 @@ unsigned long __must_check __copy_to_user(void __user *to, const void *from,
147 " jg 2b\n" \ 147 " jg 2b\n" \
148 ".popsection\n" \ 148 ".popsection\n" \
149 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \ 149 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
150 : "=d" (__rc), "=Q" (*(to)) \ 150 : "=d" (__rc), "+Q" (*(to)) \
151 : "d" (size), "Q" (*(from)), \ 151 : "d" (size), "Q" (*(from)), \
152 "d" (__reg0), "K" (-EFAULT) \ 152 "d" (__reg0), "K" (-EFAULT) \
153 : "cc"); \ 153 : "cc"); \
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 47a973b5b4f1..5dab859b0d54 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -909,13 +909,11 @@ void __init smp_prepare_boot_cpu(void)
909{ 909{
910 struct pcpu *pcpu = pcpu_devices; 910 struct pcpu *pcpu = pcpu_devices;
911 911
912 WARN_ON(!cpu_present(0) || !cpu_online(0));
912 pcpu->state = CPU_STATE_CONFIGURED; 913 pcpu->state = CPU_STATE_CONFIGURED;
913 pcpu->address = stap();
914 pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix(); 914 pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
915 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 915 S390_lowcore.percpu_offset = __per_cpu_offset[0];
916 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); 916 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
917 set_cpu_present(0, true);
918 set_cpu_online(0, true);
919} 917}
920 918
921void __init smp_cpus_done(unsigned int max_cpus) 919void __init smp_cpus_done(unsigned int max_cpus)
@@ -924,6 +922,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
924 922
925void __init smp_setup_processor_id(void) 923void __init smp_setup_processor_id(void)
926{ 924{
925 pcpu_devices[0].address = stap();
927 S390_lowcore.cpu_nr = 0; 926 S390_lowcore.cpu_nr = 0;
928 S390_lowcore.spinlock_lockval = arch_spin_lockval(0); 927 S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
929} 928}
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 5ccf95396251..72307f108c40 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -63,11 +63,9 @@ SECTIONS
63 63
64 . = ALIGN(PAGE_SIZE); 64 . = ALIGN(PAGE_SIZE);
65 __start_ro_after_init = .; 65 __start_ro_after_init = .;
66 __start_data_ro_after_init = .;
67 .data..ro_after_init : { 66 .data..ro_after_init : {
68 *(.data..ro_after_init) 67 *(.data..ro_after_init)
69 } 68 }
70 __end_data_ro_after_init = .;
71 EXCEPTION_TABLE(16) 69 EXCEPTION_TABLE(16)
72 . = ALIGN(PAGE_SIZE); 70 . = ALIGN(PAGE_SIZE);
73 __end_ro_after_init = .; 71 __end_ro_after_init = .;
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index d55c829a5944..ddbffb715b40 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -168,8 +168,7 @@ union page_table_entry {
168 unsigned long z : 1; /* Zero Bit */ 168 unsigned long z : 1; /* Zero Bit */
169 unsigned long i : 1; /* Page-Invalid Bit */ 169 unsigned long i : 1; /* Page-Invalid Bit */
170 unsigned long p : 1; /* DAT-Protection Bit */ 170 unsigned long p : 1; /* DAT-Protection Bit */
171 unsigned long co : 1; /* Change-Recording Override */ 171 unsigned long : 9;
172 unsigned long : 8;
173 }; 172 };
174}; 173};
175 174
@@ -745,8 +744,6 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
745 return PGM_PAGE_TRANSLATION; 744 return PGM_PAGE_TRANSLATION;
746 if (pte.z) 745 if (pte.z)
747 return PGM_TRANSLATION_SPEC; 746 return PGM_TRANSLATION_SPEC;
748 if (pte.co && !edat1)
749 return PGM_TRANSLATION_SPEC;
750 dat_protection |= pte.p; 747 dat_protection |= pte.p;
751 raddr.pfra = pte.pfra; 748 raddr.pfra = pte.pfra;
752real_address: 749real_address:
@@ -1182,7 +1179,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
1182 rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val); 1179 rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val);
1183 if (!rc && pte.i) 1180 if (!rc && pte.i)
1184 rc = PGM_PAGE_TRANSLATION; 1181 rc = PGM_PAGE_TRANSLATION;
1185 if (!rc && (pte.z || (pte.co && sg->edat_level < 1))) 1182 if (!rc && pte.z)
1186 rc = PGM_TRANSLATION_SPEC; 1183 rc = PGM_TRANSLATION_SPEC;
1187shadow_page: 1184shadow_page:
1188 pte.p |= dat_protection; 1185 pte.p |= dat_protection;
diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h
index f294dd42fc7d..5961b2d8398a 100644
--- a/arch/sparc/include/asm/page_64.h
+++ b/arch/sparc/include/asm/page_64.h
@@ -17,6 +17,7 @@
17 17
18#define HPAGE_SHIFT 23 18#define HPAGE_SHIFT 23
19#define REAL_HPAGE_SHIFT 22 19#define REAL_HPAGE_SHIFT 22
20#define HPAGE_2GB_SHIFT 31
20#define HPAGE_256MB_SHIFT 28 21#define HPAGE_256MB_SHIFT 28
21#define HPAGE_64K_SHIFT 16 22#define HPAGE_64K_SHIFT 16
22#define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT) 23#define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT)
@@ -27,7 +28,7 @@
27#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 28#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
28#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 29#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
29#define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT)) 30#define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT))
30#define HUGE_MAX_HSTATE 3 31#define HUGE_MAX_HSTATE 4
31#endif 32#endif
32 33
33#ifndef __ASSEMBLY__ 34#ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 8a598528ec1f..6fbd931f0570 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -679,26 +679,27 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
679 return pte_pfn(pte); 679 return pte_pfn(pte);
680} 680}
681 681
682#ifdef CONFIG_TRANSPARENT_HUGEPAGE 682#define __HAVE_ARCH_PMD_WRITE
683static inline unsigned long pmd_dirty(pmd_t pmd) 683static inline unsigned long pmd_write(pmd_t pmd)
684{ 684{
685 pte_t pte = __pte(pmd_val(pmd)); 685 pte_t pte = __pte(pmd_val(pmd));
686 686
687 return pte_dirty(pte); 687 return pte_write(pte);
688} 688}
689 689
690static inline unsigned long pmd_young(pmd_t pmd) 690#ifdef CONFIG_TRANSPARENT_HUGEPAGE
691static inline unsigned long pmd_dirty(pmd_t pmd)
691{ 692{
692 pte_t pte = __pte(pmd_val(pmd)); 693 pte_t pte = __pte(pmd_val(pmd));
693 694
694 return pte_young(pte); 695 return pte_dirty(pte);
695} 696}
696 697
697static inline unsigned long pmd_write(pmd_t pmd) 698static inline unsigned long pmd_young(pmd_t pmd)
698{ 699{
699 pte_t pte = __pte(pmd_val(pmd)); 700 pte_t pte = __pte(pmd_val(pmd));
700 701
701 return pte_write(pte); 702 return pte_young(pte);
702} 703}
703 704
704static inline unsigned long pmd_trans_huge(pmd_t pmd) 705static inline unsigned long pmd_trans_huge(pmd_t pmd)
diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h
index 365d4cb267b4..dd27159819eb 100644
--- a/arch/sparc/include/asm/processor_32.h
+++ b/arch/sparc/include/asm/processor_32.h
@@ -18,12 +18,6 @@
18#include <asm/signal.h> 18#include <asm/signal.h>
19#include <asm/page.h> 19#include <asm/page.h>
20 20
21/*
22 * The sparc has no problems with write protection
23 */
24#define wp_works_ok 1
25#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
26
27/* Whee, this is STACK_TOP + PAGE_SIZE and the lowest kernel address too... 21/* Whee, this is STACK_TOP + PAGE_SIZE and the lowest kernel address too...
28 * That one page is used to protect kernel from intruders, so that 22 * That one page is used to protect kernel from intruders, so that
29 * we can make our access_ok test faster 23 * we can make our access_ok test faster
diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h
index 6448cfc8292f..b58ee9018433 100644
--- a/arch/sparc/include/asm/processor_64.h
+++ b/arch/sparc/include/asm/processor_64.h
@@ -18,10 +18,6 @@
18#include <asm/ptrace.h> 18#include <asm/ptrace.h>
19#include <asm/page.h> 19#include <asm/page.h>
20 20
21/* The sparc has no problems with write protection */
22#define wp_works_ok 1
23#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
24
25/* 21/*
26 * User lives in his very own context, and cannot reference us. Note 22 * User lives in his very own context, and cannot reference us. Note
27 * that TASK_SIZE is a misnomer, it really gives maximum user virtual 23 * that TASK_SIZE is a misnomer, it really gives maximum user virtual
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 6aa3da152c20..44101196d02b 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -96,6 +96,7 @@ sparc64_boot:
96 andn %g1, PSTATE_AM, %g1 96 andn %g1, PSTATE_AM, %g1
97 wrpr %g1, 0x0, %pstate 97 wrpr %g1, 0x0, %pstate
98 ba,a,pt %xcc, 1f 98 ba,a,pt %xcc, 1f
99 nop
99 100
100 .globl prom_finddev_name, prom_chosen_path, prom_root_node 101 .globl prom_finddev_name, prom_chosen_path, prom_root_node
101 .globl prom_getprop_name, prom_mmu_name, prom_peer_name 102 .globl prom_getprop_name, prom_mmu_name, prom_peer_name
@@ -613,6 +614,7 @@ niagara_tlb_fixup:
613 nop 614 nop
614 615
615 ba,a,pt %xcc, 80f 616 ba,a,pt %xcc, 80f
617 nop
616niagara4_patch: 618niagara4_patch:
617 call niagara4_patch_copyops 619 call niagara4_patch_copyops
618 nop 620 nop
@@ -622,6 +624,7 @@ niagara4_patch:
622 nop 624 nop
623 625
624 ba,a,pt %xcc, 80f 626 ba,a,pt %xcc, 80f
627 nop
625 628
626niagara2_patch: 629niagara2_patch:
627 call niagara2_patch_copyops 630 call niagara2_patch_copyops
@@ -632,6 +635,7 @@ niagara2_patch:
632 nop 635 nop
633 636
634 ba,a,pt %xcc, 80f 637 ba,a,pt %xcc, 80f
638 nop
635 639
636niagara_patch: 640niagara_patch:
637 call niagara_patch_copyops 641 call niagara_patch_copyops
diff --git a/arch/sparc/kernel/misctrap.S b/arch/sparc/kernel/misctrap.S
index 34b4933900bf..9276d2f0dd86 100644
--- a/arch/sparc/kernel/misctrap.S
+++ b/arch/sparc/kernel/misctrap.S
@@ -82,6 +82,7 @@ do_stdfmna:
82 call handle_stdfmna 82 call handle_stdfmna
83 add %sp, PTREGS_OFF, %o0 83 add %sp, PTREGS_OFF, %o0
84 ba,a,pt %xcc, rtrap 84 ba,a,pt %xcc, rtrap
85 nop
85 .size do_stdfmna,.-do_stdfmna 86 .size do_stdfmna,.-do_stdfmna
86 87
87 .type breakpoint_trap,#function 88 .type breakpoint_trap,#function
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index df9e731a76f5..fc5124ccdb53 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -351,7 +351,7 @@ static int genregs64_set(struct task_struct *target,
351 } 351 }
352 352
353 if (!ret) { 353 if (!ret) {
354 unsigned long y; 354 unsigned long y = regs->y;
355 355
356 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 356 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
357 &y, 357 &y,
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index 216948ca4382..709a82ebd294 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -237,6 +237,7 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
237 bne,pt %xcc, user_rtt_fill_32bit 237 bne,pt %xcc, user_rtt_fill_32bit
238 wrpr %g1, %cwp 238 wrpr %g1, %cwp
239 ba,a,pt %xcc, user_rtt_fill_64bit 239 ba,a,pt %xcc, user_rtt_fill_64bit
240 nop
240 241
241user_rtt_fill_fixup_dax: 242user_rtt_fill_fixup_dax:
242 ba,pt %xcc, user_rtt_fill_fixup_common 243 ba,pt %xcc, user_rtt_fill_fixup_common
diff --git a/arch/sparc/kernel/spiterrs.S b/arch/sparc/kernel/spiterrs.S
index 4a73009f66a5..d7e540842809 100644
--- a/arch/sparc/kernel/spiterrs.S
+++ b/arch/sparc/kernel/spiterrs.S
@@ -86,6 +86,7 @@ __spitfire_cee_trap_continue:
86 rd %pc, %g7 86 rd %pc, %g7
87 87
88 ba,a,pt %xcc, 2f 88 ba,a,pt %xcc, 2f
89 nop
89 90
901: ba,pt %xcc, etrap_irq 911: ba,pt %xcc, etrap_irq
91 rd %pc, %g7 92 rd %pc, %g7
diff --git a/arch/sparc/kernel/sun4v_tlb_miss.S b/arch/sparc/kernel/sun4v_tlb_miss.S
index 6179e19bc9b9..c19f352f46c7 100644
--- a/arch/sparc/kernel/sun4v_tlb_miss.S
+++ b/arch/sparc/kernel/sun4v_tlb_miss.S
@@ -352,6 +352,7 @@ sun4v_mna:
352 call sun4v_do_mna 352 call sun4v_do_mna
353 add %sp, PTREGS_OFF, %o0 353 add %sp, PTREGS_OFF, %o0
354 ba,a,pt %xcc, rtrap 354 ba,a,pt %xcc, rtrap
355 nop
355 356
356 /* Privileged Action. */ 357 /* Privileged Action. */
357sun4v_privact: 358sun4v_privact:
diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S
index 5604a2b051d4..364af3250646 100644
--- a/arch/sparc/kernel/urtt_fill.S
+++ b/arch/sparc/kernel/urtt_fill.S
@@ -92,6 +92,7 @@ user_rtt_fill_fixup_common:
92 call sun4v_data_access_exception 92 call sun4v_data_access_exception
93 nop 93 nop
94 ba,a,pt %xcc, rtrap 94 ba,a,pt %xcc, rtrap
95 nop
95 96
961: call spitfire_data_access_exception 971: call spitfire_data_access_exception
97 nop 98 nop
diff --git a/arch/sparc/kernel/winfixup.S b/arch/sparc/kernel/winfixup.S
index 855019a8590e..1ee173cc3c39 100644
--- a/arch/sparc/kernel/winfixup.S
+++ b/arch/sparc/kernel/winfixup.S
@@ -152,6 +152,8 @@ fill_fixup_dax:
152 call sun4v_data_access_exception 152 call sun4v_data_access_exception
153 nop 153 nop
154 ba,a,pt %xcc, rtrap 154 ba,a,pt %xcc, rtrap
155 nop
1551: call spitfire_data_access_exception 1561: call spitfire_data_access_exception
156 nop 157 nop
157 ba,a,pt %xcc, rtrap 158 ba,a,pt %xcc, rtrap
159 nop
diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S
index c629dbd121b6..64dcd6cdb606 100644
--- a/arch/sparc/lib/NG2memcpy.S
+++ b/arch/sparc/lib/NG2memcpy.S
@@ -326,11 +326,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
326 blu 170f 326 blu 170f
327 nop 327 nop
328 ba,a,pt %xcc, 180f 328 ba,a,pt %xcc, 180f
329 nop
329 330
3304: /* 32 <= low bits < 48 */ 3314: /* 32 <= low bits < 48 */
331 blu 150f 332 blu 150f
332 nop 333 nop
333 ba,a,pt %xcc, 160f 334 ba,a,pt %xcc, 160f
335 nop
3345: /* 0 < low bits < 32 */ 3365: /* 0 < low bits < 32 */
335 blu,a 6f 337 blu,a 6f
336 cmp %g2, 8 338 cmp %g2, 8
@@ -338,6 +340,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
338 blu 130f 340 blu 130f
339 nop 341 nop
340 ba,a,pt %xcc, 140f 342 ba,a,pt %xcc, 140f
343 nop
3416: /* 0 < low bits < 16 */ 3446: /* 0 < low bits < 16 */
342 bgeu 120f 345 bgeu 120f
343 nop 346 nop
@@ -475,6 +478,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
475 brz,pt %o2, 85f 478 brz,pt %o2, 85f
476 sub %o0, %o1, GLOBAL_SPARE 479 sub %o0, %o1, GLOBAL_SPARE
477 ba,a,pt %XCC, 90f 480 ba,a,pt %XCC, 90f
481 nop
478 482
479 .align 64 483 .align 64
48075: /* 16 < len <= 64 */ 48475: /* 16 < len <= 64 */
diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
index 75bb93b1437f..78ea962edcbe 100644
--- a/arch/sparc/lib/NG4memcpy.S
+++ b/arch/sparc/lib/NG4memcpy.S
@@ -530,4 +530,5 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
530 bne,pt %icc, 1b 530 bne,pt %icc, 1b
531 EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1) 531 EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1)
532 ba,a,pt %icc, .Lexit 532 ba,a,pt %icc, .Lexit
533 nop
533 .size FUNC_NAME, .-FUNC_NAME 534 .size FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc/lib/NG4memset.S b/arch/sparc/lib/NG4memset.S
index 41da4bdd95cb..7c0c81f18837 100644
--- a/arch/sparc/lib/NG4memset.S
+++ b/arch/sparc/lib/NG4memset.S
@@ -102,4 +102,5 @@ NG4bzero:
102 bne,pt %icc, 1b 102 bne,pt %icc, 1b
103 add %o0, 0x30, %o0 103 add %o0, 0x30, %o0
104 ba,a,pt %icc, .Lpostloop 104 ba,a,pt %icc, .Lpostloop
105 nop
105 .size NG4bzero,.-NG4bzero 106 .size NG4bzero,.-NG4bzero
diff --git a/arch/sparc/lib/NGmemcpy.S b/arch/sparc/lib/NGmemcpy.S
index d88c4ed50a00..cd654a719b27 100644
--- a/arch/sparc/lib/NGmemcpy.S
+++ b/arch/sparc/lib/NGmemcpy.S
@@ -394,6 +394,7 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
394 brz,pt %i2, 85f 394 brz,pt %i2, 85f
395 sub %o0, %i1, %i3 395 sub %o0, %i1, %i3
396 ba,a,pt %XCC, 90f 396 ba,a,pt %XCC, 90f
397 nop
397 398
398 .align 64 399 .align 64
39970: /* 16 < len <= 64 */ 40070: /* 16 < len <= 64 */
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 323bc6b6e3ad..ee5273ad918d 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -143,6 +143,10 @@ static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
143 pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V; 143 pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
144 144
145 switch (shift) { 145 switch (shift) {
146 case HPAGE_2GB_SHIFT:
147 hugepage_size = _PAGE_SZ2GB_4V;
148 pte_val(entry) |= _PAGE_PMD_HUGE;
149 break;
146 case HPAGE_256MB_SHIFT: 150 case HPAGE_256MB_SHIFT:
147 hugepage_size = _PAGE_SZ256MB_4V; 151 hugepage_size = _PAGE_SZ256MB_4V;
148 pte_val(entry) |= _PAGE_PMD_HUGE; 152 pte_val(entry) |= _PAGE_PMD_HUGE;
@@ -183,6 +187,9 @@ static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
183 unsigned int shift; 187 unsigned int shift;
184 188
185 switch (tte_szbits) { 189 switch (tte_szbits) {
190 case _PAGE_SZ2GB_4V:
191 shift = HPAGE_2GB_SHIFT;
192 break;
186 case _PAGE_SZ256MB_4V: 193 case _PAGE_SZ256MB_4V:
187 shift = HPAGE_256MB_SHIFT; 194 shift = HPAGE_256MB_SHIFT;
188 break; 195 break;
@@ -261,7 +268,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
261 if (!pmd) 268 if (!pmd)
262 return NULL; 269 return NULL;
263 270
264 if (sz == PMD_SHIFT) 271 if (sz >= PMD_SIZE)
265 pte = (pte_t *)pmd; 272 pte = (pte_t *)pmd;
266 else 273 else
267 pte = pte_alloc_map(mm, pmd, addr); 274 pte = pte_alloc_map(mm, pmd, addr);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index ccd455328989..0cda653ae007 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -337,6 +337,10 @@ static int __init setup_hugepagesz(char *string)
337 hugepage_shift = ilog2(hugepage_size); 337 hugepage_shift = ilog2(hugepage_size);
338 338
339 switch (hugepage_shift) { 339 switch (hugepage_shift) {
340 case HPAGE_2GB_SHIFT:
341 hv_pgsz_mask = HV_PGSZ_MASK_2GB;
342 hv_pgsz_idx = HV_PGSZ_IDX_2GB;
343 break;
340 case HPAGE_256MB_SHIFT: 344 case HPAGE_256MB_SHIFT:
341 hv_pgsz_mask = HV_PGSZ_MASK_256MB; 345 hv_pgsz_mask = HV_PGSZ_MASK_256MB;
342 hv_pgsz_idx = HV_PGSZ_IDX_256MB; 346 hv_pgsz_idx = HV_PGSZ_IDX_256MB;
@@ -1563,7 +1567,7 @@ bool kern_addr_valid(unsigned long addr)
1563 if ((long)addr < 0L) { 1567 if ((long)addr < 0L) {
1564 unsigned long pa = __pa(addr); 1568 unsigned long pa = __pa(addr);
1565 1569
1566 if ((addr >> max_phys_bits) != 0UL) 1570 if ((pa >> max_phys_bits) != 0UL)
1567 return false; 1571 return false;
1568 1572
1569 return pfn_valid(pa >> PAGE_SHIFT); 1573 return pfn_valid(pa >> PAGE_SHIFT);
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index def82f6d626f..8e76ebba2986 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -54,6 +54,7 @@
54enum mbus_module srmmu_modtype; 54enum mbus_module srmmu_modtype;
55static unsigned int hwbug_bitmask; 55static unsigned int hwbug_bitmask;
56int vac_cache_size; 56int vac_cache_size;
57EXPORT_SYMBOL(vac_cache_size);
57int vac_line_size; 58int vac_line_size;
58 59
59extern struct resource sparc_iomap; 60extern struct resource sparc_iomap;
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index afda3bbf7854..ee8066c3d96c 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -154,7 +154,7 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
154 if (pte_val(*pte) & _PAGE_VALID) { 154 if (pte_val(*pte) & _PAGE_VALID) {
155 bool exec = pte_exec(*pte); 155 bool exec = pte_exec(*pte);
156 156
157 tlb_batch_add_one(mm, vaddr, exec, false); 157 tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
158 } 158 }
159 pte++; 159 pte++;
160 vaddr += PAGE_SIZE; 160 vaddr += PAGE_SIZE;
@@ -209,9 +209,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
209 pte_t orig_pte = __pte(pmd_val(orig)); 209 pte_t orig_pte = __pte(pmd_val(orig));
210 bool exec = pte_exec(orig_pte); 210 bool exec = pte_exec(orig_pte);
211 211
212 tlb_batch_add_one(mm, addr, exec, true); 212 tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
213 tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec, 213 tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
214 true); 214 REAL_HPAGE_SHIFT);
215 } else { 215 } else {
216 tlb_batch_pmd_scan(mm, addr, orig); 216 tlb_batch_pmd_scan(mm, addr, orig);
217 } 217 }
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 0a04811f06b7..bedf08b22a47 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -122,7 +122,7 @@ void flush_tsb_user(struct tlb_batch *tb)
122 122
123 spin_lock_irqsave(&mm->context.lock, flags); 123 spin_lock_irqsave(&mm->context.lock, flags);
124 124
125 if (tb->hugepage_shift < HPAGE_SHIFT) { 125 if (tb->hugepage_shift < REAL_HPAGE_SHIFT) {
126 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; 126 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
127 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; 127 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
128 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 128 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
@@ -155,7 +155,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
155 155
156 spin_lock_irqsave(&mm->context.lock, flags); 156 spin_lock_irqsave(&mm->context.lock, flags);
157 157
158 if (hugepage_shift < HPAGE_SHIFT) { 158 if (hugepage_shift < REAL_HPAGE_SHIFT) {
159 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; 159 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
160 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; 160 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
161 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 161 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 2d449337a360..a94a4d10f2df 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -120,10 +120,6 @@ else
120 # -funit-at-a-time shrinks the kernel .text considerably 120 # -funit-at-a-time shrinks the kernel .text considerably
121 # unfortunately it makes reading oopses harder. 121 # unfortunately it makes reading oopses harder.
122 KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time) 122 KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time)
123
124 # this works around some issues with generating unwind tables in older gccs
125 # newer gccs do it by default
126 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
127endif 123endif
128 124
129ifdef CONFIG_X86_X32 125ifdef CONFIG_X86_X32
@@ -147,6 +143,37 @@ ifeq ($(CONFIG_KMEMCHECK),y)
147 KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy) 143 KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy)
148endif 144endif
149 145
146#
147# If the function graph tracer is used with mcount instead of fentry,
148# '-maccumulate-outgoing-args' is needed to prevent a GCC bug
149# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=42109)
150#
151ifdef CONFIG_FUNCTION_GRAPH_TRACER
152 ifndef CONFIG_HAVE_FENTRY
153 ACCUMULATE_OUTGOING_ARGS := 1
154 else
155 ifeq ($(call cc-option-yn, -mfentry), n)
156 ACCUMULATE_OUTGOING_ARGS := 1
157 endif
158 endif
159endif
160
161#
162# Jump labels need '-maccumulate-outgoing-args' for gcc < 4.5.2 to prevent a
163# GCC bug (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=46226). There's no way
164# to test for this bug at compile-time because the test case needs to execute,
165# which is a no-go for cross compilers. So check the GCC version instead.
166#
167ifdef CONFIG_JUMP_LABEL
168 ifneq ($(ACCUMULATE_OUTGOING_ARGS), 1)
169 ACCUMULATE_OUTGOING_ARGS = $(call cc-if-fullversion, -lt, 040502, 1)
170 endif
171endif
172
173ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
174 KBUILD_CFLAGS += -maccumulate-outgoing-args
175endif
176
150# Stackpointer is addressed different for 32 bit and 64 bit x86 177# Stackpointer is addressed different for 32 bit and 64 bit x86
151sp-$(CONFIG_X86_32) := esp 178sp-$(CONFIG_X86_32) := esp
152sp-$(CONFIG_X86_64) := rsp 179sp-$(CONFIG_X86_64) := rsp
diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
index 6647ed49c66c..a45eb15b7cf2 100644
--- a/arch/x86/Makefile_32.cpu
+++ b/arch/x86/Makefile_32.cpu
@@ -45,24 +45,6 @@ cflags-$(CONFIG_MGEODE_LX) += $(call cc-option,-march=geode,-march=pentium-mmx)
45# cpu entries 45# cpu entries
46cflags-$(CONFIG_X86_GENERIC) += $(call tune,generic,$(call tune,i686)) 46cflags-$(CONFIG_X86_GENERIC) += $(call tune,generic,$(call tune,i686))
47 47
48# Work around the pentium-mmx code generator madness of gcc4.4.x which
49# does stack alignment by generating horrible code _before_ the mcount
50# prologue (push %ebp, mov %esp, %ebp) which breaks the function graph
51# tracer assumptions. For i686, generic, core2 this is set by the
52# compiler anyway
53ifeq ($(CONFIG_FUNCTION_GRAPH_TRACER), y)
54ADD_ACCUMULATE_OUTGOING_ARGS := y
55endif
56
57# Work around to a bug with asm goto with first implementations of it
58# in gcc causing gcc to mess up the push and pop of the stack in some
59# uses of asm goto.
60ifeq ($(CONFIG_JUMP_LABEL), y)
61ADD_ACCUMULATE_OUTGOING_ARGS := y
62endif
63
64cflags-$(ADD_ACCUMULATE_OUTGOING_ARGS) += $(call cc-option,-maccumulate-outgoing-args)
65
66# Bug fix for binutils: this option is required in order to keep 48# Bug fix for binutils: this option is required in order to keep
67# binutils from generating NOPL instructions against our will. 49# binutils from generating NOPL instructions against our will.
68ifneq ($(CONFIG_X86_P6_NOP),y) 50ifneq ($(CONFIG_X86_P6_NOP),y)
diff --git a/arch/x86/boot/compressed/error.c b/arch/x86/boot/compressed/error.c
index 6248740b68b5..31922023de49 100644
--- a/arch/x86/boot/compressed/error.c
+++ b/arch/x86/boot/compressed/error.c
@@ -4,6 +4,7 @@
4 * memcpy() and memmove() are defined for the compressed boot environment. 4 * memcpy() and memmove() are defined for the compressed boot environment.
5 */ 5 */
6#include "misc.h" 6#include "misc.h"
7#include "error.h"
7 8
8void warn(char *m) 9void warn(char *m)
9{ 10{
diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c
index 7853b53959cd..3f9d1a83891a 100644
--- a/arch/x86/entry/vdso/vdso32-setup.c
+++ b/arch/x86/entry/vdso/vdso32-setup.c
@@ -30,8 +30,10 @@ static int __init vdso32_setup(char *s)
30{ 30{
31 vdso32_enabled = simple_strtoul(s, NULL, 0); 31 vdso32_enabled = simple_strtoul(s, NULL, 0);
32 32
33 if (vdso32_enabled > 1) 33 if (vdso32_enabled > 1) {
34 pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n"); 34 pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n");
35 vdso32_enabled = 0;
36 }
35 37
36 return 1; 38 return 1;
37} 39}
@@ -62,13 +64,18 @@ subsys_initcall(sysenter_setup);
62/* Register vsyscall32 into the ABI table */ 64/* Register vsyscall32 into the ABI table */
63#include <linux/sysctl.h> 65#include <linux/sysctl.h>
64 66
67static const int zero;
68static const int one = 1;
69
65static struct ctl_table abi_table2[] = { 70static struct ctl_table abi_table2[] = {
66 { 71 {
67 .procname = "vsyscall32", 72 .procname = "vsyscall32",
68 .data = &vdso32_enabled, 73 .data = &vdso32_enabled,
69 .maxlen = sizeof(int), 74 .maxlen = sizeof(int),
70 .mode = 0644, 75 .mode = 0644,
71 .proc_handler = proc_dointvec 76 .proc_handler = proc_dointvec_minmax,
77 .extra1 = (int *)&zero,
78 .extra2 = (int *)&one,
72 }, 79 },
73 {} 80 {}
74}; 81};
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 2aa1ad194db2..580b60f5ac83 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2256,6 +2256,7 @@ void arch_perf_update_userpage(struct perf_event *event,
2256 struct perf_event_mmap_page *userpg, u64 now) 2256 struct perf_event_mmap_page *userpg, u64 now)
2257{ 2257{
2258 struct cyc2ns_data *data; 2258 struct cyc2ns_data *data;
2259 u64 offset;
2259 2260
2260 userpg->cap_user_time = 0; 2261 userpg->cap_user_time = 0;
2261 userpg->cap_user_time_zero = 0; 2262 userpg->cap_user_time_zero = 0;
@@ -2263,11 +2264,13 @@ void arch_perf_update_userpage(struct perf_event *event,
2263 !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED); 2264 !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED);
2264 userpg->pmc_width = x86_pmu.cntval_bits; 2265 userpg->pmc_width = x86_pmu.cntval_bits;
2265 2266
2266 if (!sched_clock_stable()) 2267 if (!using_native_sched_clock() || !sched_clock_stable())
2267 return; 2268 return;
2268 2269
2269 data = cyc2ns_read_begin(); 2270 data = cyc2ns_read_begin();
2270 2271
2272 offset = data->cyc2ns_offset + __sched_clock_offset;
2273
2271 /* 2274 /*
2272 * Internal timekeeping for enabled/running/stopped times 2275 * Internal timekeeping for enabled/running/stopped times
2273 * is always in the local_clock domain. 2276 * is always in the local_clock domain.
@@ -2275,7 +2278,7 @@ void arch_perf_update_userpage(struct perf_event *event,
2275 userpg->cap_user_time = 1; 2278 userpg->cap_user_time = 1;
2276 userpg->time_mult = data->cyc2ns_mul; 2279 userpg->time_mult = data->cyc2ns_mul;
2277 userpg->time_shift = data->cyc2ns_shift; 2280 userpg->time_shift = data->cyc2ns_shift;
2278 userpg->time_offset = data->cyc2ns_offset - now; 2281 userpg->time_offset = offset - now;
2279 2282
2280 /* 2283 /*
2281 * cap_user_time_zero doesn't make sense when we're using a different 2284 * cap_user_time_zero doesn't make sense when we're using a different
@@ -2283,7 +2286,7 @@ void arch_perf_update_userpage(struct perf_event *event,
2283 */ 2286 */
2284 if (!event->attr.use_clockid) { 2287 if (!event->attr.use_clockid) {
2285 userpg->cap_user_time_zero = 1; 2288 userpg->cap_user_time_zero = 1;
2286 userpg->time_zero = data->cyc2ns_offset; 2289 userpg->time_zero = offset;
2287 } 2290 }
2288 2291
2289 cyc2ns_read_end(data); 2292 cyc2ns_read_end(data);
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 81b321ace8e0..f924629836a8 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -507,6 +507,9 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
507 cpuc->lbr_entries[i].to = msr_lastbranch.to; 507 cpuc->lbr_entries[i].to = msr_lastbranch.to;
508 cpuc->lbr_entries[i].mispred = 0; 508 cpuc->lbr_entries[i].mispred = 0;
509 cpuc->lbr_entries[i].predicted = 0; 509 cpuc->lbr_entries[i].predicted = 0;
510 cpuc->lbr_entries[i].in_tx = 0;
511 cpuc->lbr_entries[i].abort = 0;
512 cpuc->lbr_entries[i].cycles = 0;
510 cpuc->lbr_entries[i].reserved = 0; 513 cpuc->lbr_entries[i].reserved = 0;
511 } 514 }
512 cpuc->lbr_stack.nr = i; 515 cpuc->lbr_stack.nr = i;
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 9d49c18b5ea9..3762536619f8 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -287,7 +287,7 @@ struct task_struct;
287 287
288#define ARCH_DLINFO_IA32 \ 288#define ARCH_DLINFO_IA32 \
289do { \ 289do { \
290 if (vdso32_enabled) { \ 290 if (VDSO_CURRENT_BASE) { \
291 NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ 291 NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
292 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ 292 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
293 } \ 293 } \
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h
index d74747b031ec..c4eda791f877 100644
--- a/arch/x86/include/asm/kvm_page_track.h
+++ b/arch/x86/include/asm/kvm_page_track.h
@@ -46,6 +46,7 @@ struct kvm_page_track_notifier_node {
46}; 46};
47 47
48void kvm_page_track_init(struct kvm *kvm); 48void kvm_page_track_init(struct kvm *kvm);
49void kvm_page_track_cleanup(struct kvm *kvm);
49 50
50void kvm_page_track_free_memslot(struct kvm_memory_slot *free, 51void kvm_page_track_free_memslot(struct kvm_memory_slot *free,
51 struct kvm_memory_slot *dont); 52 struct kvm_memory_slot *dont);
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index 2c1ebeb4d737..529bb4a6487a 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -55,7 +55,8 @@ static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n)
55 * @size: number of bytes to write back 55 * @size: number of bytes to write back
56 * 56 *
57 * Write back a cache range using the CLWB (cache line write back) 57 * Write back a cache range using the CLWB (cache line write back)
58 * instruction. 58 * instruction. Note that @size is internally rounded up to be cache
59 * line size aligned.
59 */ 60 */
60static inline void arch_wb_cache_pmem(void *addr, size_t size) 61static inline void arch_wb_cache_pmem(void *addr, size_t size)
61{ 62{
@@ -69,15 +70,6 @@ static inline void arch_wb_cache_pmem(void *addr, size_t size)
69 clwb(p); 70 clwb(p);
70} 71}
71 72
72/*
73 * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
74 * iterators, so for other types (bvec & kvec) we must do a cache write-back.
75 */
76static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
77{
78 return iter_is_iovec(i) == false;
79}
80
81/** 73/**
82 * arch_copy_from_iter_pmem - copy data from an iterator to PMEM 74 * arch_copy_from_iter_pmem - copy data from an iterator to PMEM
83 * @addr: PMEM destination address 75 * @addr: PMEM destination address
@@ -94,7 +86,35 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
94 /* TODO: skip the write-back by always using non-temporal stores */ 86 /* TODO: skip the write-back by always using non-temporal stores */
95 len = copy_from_iter_nocache(addr, bytes, i); 87 len = copy_from_iter_nocache(addr, bytes, i);
96 88
97 if (__iter_needs_pmem_wb(i)) 89 /*
90 * In the iovec case on x86_64 copy_from_iter_nocache() uses
91 * non-temporal stores for the bulk of the transfer, but we need
92 * to manually flush if the transfer is unaligned. A cached
93 * memory copy is used when destination or size is not naturally
94 * aligned. That is:
95 * - Require 8-byte alignment when size is 8 bytes or larger.
96 * - Require 4-byte alignment when size is 4 bytes.
97 *
98 * In the non-iovec case the entire destination needs to be
99 * flushed.
100 */
101 if (iter_is_iovec(i)) {
102 unsigned long flushed, dest = (unsigned long) addr;
103
104 if (bytes < 8) {
105 if (!IS_ALIGNED(dest, 4) || (bytes != 4))
106 arch_wb_cache_pmem(addr, 1);
107 } else {
108 if (!IS_ALIGNED(dest, 8)) {
109 dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
110 arch_wb_cache_pmem(addr, 1);
111 }
112
113 flushed = dest - (unsigned long) addr;
114 if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
115 arch_wb_cache_pmem(addr + bytes - 1, 1);
116 }
117 } else
98 arch_wb_cache_pmem(addr, bytes); 118 arch_wb_cache_pmem(addr, bytes);
99 119
100 return len; 120 return len;
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index a04eabd43d06..27e9f9d769b8 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -12,6 +12,8 @@ extern int recalibrate_cpu_khz(void);
12 12
13extern int no_timer_check; 13extern int no_timer_check;
14 14
15extern bool using_native_sched_clock(void);
16
15/* 17/*
16 * We use the full linear equation: f(x) = a + b*x, in order to allow 18 * We use the full linear equation: f(x) = a + b*x, in order to allow
17 * a continuous function in the face of dynamic freq changes. 19 * a continuous function in the face of dynamic freq changes.
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 72e8300b1e8a..9cffb44a3cf5 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -485,15 +485,17 @@ static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
485 485
486 if (paddr < uv_hub_info->lowmem_remap_top) 486 if (paddr < uv_hub_info->lowmem_remap_top)
487 paddr |= uv_hub_info->lowmem_remap_base; 487 paddr |= uv_hub_info->lowmem_remap_base;
488 paddr |= uv_hub_info->gnode_upper; 488
489 if (m_val) 489 if (m_val) {
490 paddr |= uv_hub_info->gnode_upper;
490 paddr = ((paddr << uv_hub_info->m_shift) 491 paddr = ((paddr << uv_hub_info->m_shift)
491 >> uv_hub_info->m_shift) | 492 >> uv_hub_info->m_shift) |
492 ((paddr >> uv_hub_info->m_val) 493 ((paddr >> uv_hub_info->m_val)
493 << uv_hub_info->n_lshift); 494 << uv_hub_info->n_lshift);
494 else 495 } else {
495 paddr |= uv_soc_phys_ram_to_nasid(paddr) 496 paddr |= uv_soc_phys_ram_to_nasid(paddr)
496 << uv_hub_info->gpa_shift; 497 << uv_hub_info->gpa_shift;
498 }
497 return paddr; 499 return paddr;
498} 500}
499 501
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index e9f8f8cdd570..86f20cc0a65e 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -1105,7 +1105,8 @@ void __init uv_init_hub_info(struct uv_hub_info_s *hi)
1105 node_id.v = uv_read_local_mmr(UVH_NODE_ID); 1105 node_id.v = uv_read_local_mmr(UVH_NODE_ID);
1106 uv_cpuid.gnode_shift = max_t(unsigned int, uv_cpuid.gnode_shift, mn.n_val); 1106 uv_cpuid.gnode_shift = max_t(unsigned int, uv_cpuid.gnode_shift, mn.n_val);
1107 hi->gnode_extra = (node_id.s.node_id & ~((1 << uv_cpuid.gnode_shift) - 1)) >> 1; 1107 hi->gnode_extra = (node_id.s.node_id & ~((1 << uv_cpuid.gnode_shift) - 1)) >> 1;
1108 hi->gnode_upper = (unsigned long)hi->gnode_extra << mn.m_val; 1108 if (mn.m_val)
1109 hi->gnode_upper = (u64)hi->gnode_extra << mn.m_val;
1109 1110
1110 if (uv_gp_table) { 1111 if (uv_gp_table) {
1111 hi->global_mmr_base = uv_gp_table->mmr_base; 1112 hi->global_mmr_base = uv_gp_table->mmr_base;
diff --git a/arch/x86/kernel/cpu/intel_rdt_schemata.c b/arch/x86/kernel/cpu/intel_rdt_schemata.c
index f369cb8db0d5..badd2b31a560 100644
--- a/arch/x86/kernel/cpu/intel_rdt_schemata.c
+++ b/arch/x86/kernel/cpu/intel_rdt_schemata.c
@@ -200,11 +200,11 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
200 } 200 }
201 201
202out: 202out:
203 rdtgroup_kn_unlock(of->kn);
204 for_each_enabled_rdt_resource(r) { 203 for_each_enabled_rdt_resource(r) {
205 kfree(r->tmp_cbms); 204 kfree(r->tmp_cbms);
206 r->tmp_cbms = NULL; 205 r->tmp_cbms = NULL;
207 } 206 }
207 rdtgroup_kn_unlock(of->kn);
208 return ret ?: nbytes; 208 return ret ?: nbytes;
209} 209}
210 210
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 8e9725c607ea..5accfbdee3f0 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -54,6 +54,8 @@
54 54
55static DEFINE_MUTEX(mce_chrdev_read_mutex); 55static DEFINE_MUTEX(mce_chrdev_read_mutex);
56 56
57static int mce_chrdev_open_count; /* #times opened */
58
57#define mce_log_get_idx_check(p) \ 59#define mce_log_get_idx_check(p) \
58({ \ 60({ \
59 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ 61 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
@@ -598,6 +600,10 @@ static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
598 if (atomic_read(&num_notifiers) > 2) 600 if (atomic_read(&num_notifiers) > 2)
599 return NOTIFY_DONE; 601 return NOTIFY_DONE;
600 602
603 /* Don't print when mcelog is running */
604 if (mce_chrdev_open_count > 0)
605 return NOTIFY_DONE;
606
601 __print_mce(m); 607 __print_mce(m);
602 608
603 return NOTIFY_DONE; 609 return NOTIFY_DONE;
@@ -1828,7 +1834,6 @@ void mcheck_cpu_clear(struct cpuinfo_x86 *c)
1828 */ 1834 */
1829 1835
1830static DEFINE_SPINLOCK(mce_chrdev_state_lock); 1836static DEFINE_SPINLOCK(mce_chrdev_state_lock);
1831static int mce_chrdev_open_count; /* #times opened */
1832static int mce_chrdev_open_exclu; /* already open exclusive? */ 1837static int mce_chrdev_open_exclu; /* already open exclusive? */
1833 1838
1834static int mce_chrdev_open(struct inode *inode, struct file *file) 1839static int mce_chrdev_open(struct inode *inode, struct file *file)
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 524cc5780a77..6e4a047e4b68 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -60,7 +60,7 @@ static const char * const th_names[] = {
60 "load_store", 60 "load_store",
61 "insn_fetch", 61 "insn_fetch",
62 "combined_unit", 62 "combined_unit",
63 "", 63 "decode_unit",
64 "northbridge", 64 "northbridge",
65 "execution_unit", 65 "execution_unit",
66}; 66};
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 8f3d9cf26ff9..cbd73eb42170 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -29,6 +29,12 @@
29#include <asm/ftrace.h> 29#include <asm/ftrace.h>
30#include <asm/nops.h> 30#include <asm/nops.h>
31 31
32#if defined(CONFIG_FUNCTION_GRAPH_TRACER) && \
33 !defined(CC_USING_FENTRY) && \
34 !defined(CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE)
35# error The following combination is not supported: ((compiler missing -mfentry) || (CONFIG_X86_32 and !CONFIG_DYNAMIC_FTRACE)) && CONFIG_FUNCTION_GRAPH_TRACER && CONFIG_CC_OPTIMIZE_FOR_SIZE
36#endif
37
32#ifdef CONFIG_DYNAMIC_FTRACE 38#ifdef CONFIG_DYNAMIC_FTRACE
33 39
34int ftrace_arch_code_modify_prepare(void) 40int ftrace_arch_code_modify_prepare(void)
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 396c042e9d0e..cc30a74e4adb 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -846,7 +846,7 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
846 task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG, 846 task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
847 me->comm, me->pid, where, frame, 847 me->comm, me->pid, where, frame,
848 regs->ip, regs->sp, regs->orig_ax); 848 regs->ip, regs->sp, regs->orig_ax);
849 print_vma_addr(" in ", regs->ip); 849 print_vma_addr(KERN_CONT " in ", regs->ip);
850 pr_cont("\n"); 850 pr_cont("\n");
851 } 851 }
852 852
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c
index ec1f756f9dc9..71beb28600d4 100644
--- a/arch/x86/kernel/signal_compat.c
+++ b/arch/x86/kernel/signal_compat.c
@@ -151,8 +151,8 @@ int __copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from,
151 151
152 if (from->si_signo == SIGSEGV) { 152 if (from->si_signo == SIGSEGV) {
153 if (from->si_code == SEGV_BNDERR) { 153 if (from->si_code == SEGV_BNDERR) {
154 compat_uptr_t lower = (unsigned long)&to->si_lower; 154 compat_uptr_t lower = (unsigned long)from->si_lower;
155 compat_uptr_t upper = (unsigned long)&to->si_upper; 155 compat_uptr_t upper = (unsigned long)from->si_upper;
156 put_user_ex(lower, &to->si_lower); 156 put_user_ex(lower, &to->si_lower);
157 put_user_ex(upper, &to->si_upper); 157 put_user_ex(upper, &to->si_upper);
158 } 158 }
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 948443e115c1..4e496379a871 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -255,7 +255,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
255 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx", 255 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
256 tsk->comm, tsk->pid, str, 256 tsk->comm, tsk->pid, str,
257 regs->ip, regs->sp, error_code); 257 regs->ip, regs->sp, error_code);
258 print_vma_addr(" in ", regs->ip); 258 print_vma_addr(KERN_CONT " in ", regs->ip);
259 pr_cont("\n"); 259 pr_cont("\n");
260 } 260 }
261 261
@@ -519,7 +519,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
519 pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx", 519 pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
520 tsk->comm, task_pid_nr(tsk), 520 tsk->comm, task_pid_nr(tsk),
521 regs->ip, regs->sp, error_code); 521 regs->ip, regs->sp, error_code);
522 print_vma_addr(" in ", regs->ip); 522 print_vma_addr(KERN_CONT " in ", regs->ip);
523 pr_cont("\n"); 523 pr_cont("\n");
524 } 524 }
525 525
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index c73a7f9e881a..714dfba6a1e7 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -328,7 +328,7 @@ unsigned long long sched_clock(void)
328 return paravirt_sched_clock(); 328 return paravirt_sched_clock();
329} 329}
330 330
331static inline bool using_native_sched_clock(void) 331bool using_native_sched_clock(void)
332{ 332{
333 return pv_time_ops.sched_clock == native_sched_clock; 333 return pv_time_ops.sched_clock == native_sched_clock;
334} 334}
@@ -336,7 +336,7 @@ static inline bool using_native_sched_clock(void)
336unsigned long long 336unsigned long long
337sched_clock(void) __attribute__((alias("native_sched_clock"))); 337sched_clock(void) __attribute__((alias("native_sched_clock")));
338 338
339static inline bool using_native_sched_clock(void) { return true; } 339bool using_native_sched_clock(void) { return true; }
340#endif 340#endif
341 341
342int check_tsc_unstable(void) 342int check_tsc_unstable(void)
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 73ea24d4f119..047b17a26269 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -657,6 +657,9 @@ void kvm_pic_destroy(struct kvm *kvm)
657{ 657{
658 struct kvm_pic *vpic = kvm->arch.vpic; 658 struct kvm_pic *vpic = kvm->arch.vpic;
659 659
660 if (!vpic)
661 return;
662
660 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master); 663 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master);
661 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave); 664 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave);
662 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr); 665 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr);
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 6e219e5c07d2..289270a6aecb 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -635,6 +635,9 @@ void kvm_ioapic_destroy(struct kvm *kvm)
635{ 635{
636 struct kvm_ioapic *ioapic = kvm->arch.vioapic; 636 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
637 637
638 if (!ioapic)
639 return;
640
638 cancel_delayed_work_sync(&ioapic->eoi_inject); 641 cancel_delayed_work_sync(&ioapic->eoi_inject);
639 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); 642 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
640 kvm->arch.vioapic = NULL; 643 kvm->arch.vioapic = NULL;
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c
index 37942e419c32..60168cdd0546 100644
--- a/arch/x86/kvm/page_track.c
+++ b/arch/x86/kvm/page_track.c
@@ -160,6 +160,14 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
160 return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]); 160 return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]);
161} 161}
162 162
163void kvm_page_track_cleanup(struct kvm *kvm)
164{
165 struct kvm_page_track_notifier_head *head;
166
167 head = &kvm->arch.track_notifier_head;
168 cleanup_srcu_struct(&head->track_srcu);
169}
170
163void kvm_page_track_init(struct kvm *kvm) 171void kvm_page_track_init(struct kvm *kvm)
164{ 172{
165 struct kvm_page_track_notifier_head *head; 173 struct kvm_page_track_notifier_head *head;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d1efe2c62b3f..5fba70646c32 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1379,6 +1379,9 @@ static void avic_vm_destroy(struct kvm *kvm)
1379 unsigned long flags; 1379 unsigned long flags;
1380 struct kvm_arch *vm_data = &kvm->arch; 1380 struct kvm_arch *vm_data = &kvm->arch;
1381 1381
1382 if (!avic)
1383 return;
1384
1382 avic_free_vm_id(vm_data->avic_vm_id); 1385 avic_free_vm_id(vm_data->avic_vm_id);
1383 1386
1384 if (vm_data->avic_logical_id_table_page) 1387 if (vm_data->avic_logical_id_table_page)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 98e82ee1e699..259e9b28ccf8 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1239,6 +1239,11 @@ static inline bool cpu_has_vmx_invvpid_global(void)
1239 return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT; 1239 return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
1240} 1240}
1241 1241
1242static inline bool cpu_has_vmx_invvpid(void)
1243{
1244 return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
1245}
1246
1242static inline bool cpu_has_vmx_ept(void) 1247static inline bool cpu_has_vmx_ept(void)
1243{ 1248{
1244 return vmcs_config.cpu_based_2nd_exec_ctrl & 1249 return vmcs_config.cpu_based_2nd_exec_ctrl &
@@ -2753,7 +2758,6 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2753 SECONDARY_EXEC_RDTSCP | 2758 SECONDARY_EXEC_RDTSCP |
2754 SECONDARY_EXEC_DESC | 2759 SECONDARY_EXEC_DESC |
2755 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2760 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2756 SECONDARY_EXEC_ENABLE_VPID |
2757 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2761 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2758 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2762 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2759 SECONDARY_EXEC_WBINVD_EXITING | 2763 SECONDARY_EXEC_WBINVD_EXITING |
@@ -2781,10 +2785,12 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2781 * though it is treated as global context. The alternative is 2785 * though it is treated as global context. The alternative is
2782 * not failing the single-context invvpid, and it is worse. 2786 * not failing the single-context invvpid, and it is worse.
2783 */ 2787 */
2784 if (enable_vpid) 2788 if (enable_vpid) {
2789 vmx->nested.nested_vmx_secondary_ctls_high |=
2790 SECONDARY_EXEC_ENABLE_VPID;
2785 vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT | 2791 vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
2786 VMX_VPID_EXTENT_SUPPORTED_MASK; 2792 VMX_VPID_EXTENT_SUPPORTED_MASK;
2787 else 2793 } else
2788 vmx->nested.nested_vmx_vpid_caps = 0; 2794 vmx->nested.nested_vmx_vpid_caps = 0;
2789 2795
2790 if (enable_unrestricted_guest) 2796 if (enable_unrestricted_guest)
@@ -4024,6 +4030,12 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
4024 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid); 4030 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid);
4025} 4031}
4026 4032
4033static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
4034{
4035 if (enable_ept)
4036 vmx_flush_tlb(vcpu);
4037}
4038
4027static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) 4039static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
4028{ 4040{
4029 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; 4041 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
@@ -6517,8 +6529,10 @@ static __init int hardware_setup(void)
6517 if (boot_cpu_has(X86_FEATURE_NX)) 6529 if (boot_cpu_has(X86_FEATURE_NX))
6518 kvm_enable_efer_bits(EFER_NX); 6530 kvm_enable_efer_bits(EFER_NX);
6519 6531
6520 if (!cpu_has_vmx_vpid()) 6532 if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
6533 !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
6521 enable_vpid = 0; 6534 enable_vpid = 0;
6535
6522 if (!cpu_has_vmx_shadow_vmcs()) 6536 if (!cpu_has_vmx_shadow_vmcs())
6523 enable_shadow_vmcs = 0; 6537 enable_shadow_vmcs = 0;
6524 if (enable_shadow_vmcs) 6538 if (enable_shadow_vmcs)
@@ -8184,6 +8198,9 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
8184 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); 8198 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
8185 case EXIT_REASON_PREEMPTION_TIMER: 8199 case EXIT_REASON_PREEMPTION_TIMER:
8186 return false; 8200 return false;
8201 case EXIT_REASON_PML_FULL:
8202 /* We don't expose PML support to L1. */
8203 return false;
8187 default: 8204 default:
8188 return true; 8205 return true;
8189 } 8206 }
@@ -8501,7 +8518,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
8501 && kvm_vmx_exit_handlers[exit_reason]) 8518 && kvm_vmx_exit_handlers[exit_reason])
8502 return kvm_vmx_exit_handlers[exit_reason](vcpu); 8519 return kvm_vmx_exit_handlers[exit_reason](vcpu);
8503 else { 8520 else {
8504 WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason); 8521 vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
8522 exit_reason);
8505 kvm_queue_exception(vcpu, UD_VECTOR); 8523 kvm_queue_exception(vcpu, UD_VECTOR);
8506 return 1; 8524 return 1;
8507 } 8525 }
@@ -8547,6 +8565,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
8547 } else { 8565 } else {
8548 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; 8566 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
8549 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 8567 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
8568 vmx_flush_tlb_ept_only(vcpu);
8550 } 8569 }
8551 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); 8570 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
8552 8571
@@ -8572,8 +8591,10 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
8572 */ 8591 */
8573 if (!is_guest_mode(vcpu) || 8592 if (!is_guest_mode(vcpu) ||
8574 !nested_cpu_has2(get_vmcs12(&vmx->vcpu), 8593 !nested_cpu_has2(get_vmcs12(&vmx->vcpu),
8575 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) 8594 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
8576 vmcs_write64(APIC_ACCESS_ADDR, hpa); 8595 vmcs_write64(APIC_ACCESS_ADDR, hpa);
8596 vmx_flush_tlb_ept_only(vcpu);
8597 }
8577} 8598}
8578 8599
8579static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) 8600static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
@@ -9974,7 +9995,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
9974{ 9995{
9975 struct vcpu_vmx *vmx = to_vmx(vcpu); 9996 struct vcpu_vmx *vmx = to_vmx(vcpu);
9976 u32 exec_control; 9997 u32 exec_control;
9977 bool nested_ept_enabled = false;
9978 9998
9979 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); 9999 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
9980 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); 10000 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
@@ -10121,8 +10141,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
10121 vmcs12->guest_intr_status); 10141 vmcs12->guest_intr_status);
10122 } 10142 }
10123 10143
10124 nested_ept_enabled = (exec_control & SECONDARY_EXEC_ENABLE_EPT) != 0;
10125
10126 /* 10144 /*
10127 * Write an illegal value to APIC_ACCESS_ADDR. Later, 10145 * Write an illegal value to APIC_ACCESS_ADDR. Later,
10128 * nested_get_vmcs12_pages will either fix it up or 10146 * nested_get_vmcs12_pages will either fix it up or
@@ -10252,9 +10270,24 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
10252 10270
10253 } 10271 }
10254 10272
10273 if (enable_pml) {
10274 /*
10275 * Conceptually we want to copy the PML address and index from
10276 * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
10277 * since we always flush the log on each vmexit, this happens
10278 * to be equivalent to simply resetting the fields in vmcs02.
10279 */
10280 ASSERT(vmx->pml_pg);
10281 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
10282 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
10283 }
10284
10255 if (nested_cpu_has_ept(vmcs12)) { 10285 if (nested_cpu_has_ept(vmcs12)) {
10256 kvm_mmu_unload(vcpu); 10286 kvm_mmu_unload(vcpu);
10257 nested_ept_init_mmu_context(vcpu); 10287 nested_ept_init_mmu_context(vcpu);
10288 } else if (nested_cpu_has2(vmcs12,
10289 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
10290 vmx_flush_tlb_ept_only(vcpu);
10258 } 10291 }
10259 10292
10260 /* 10293 /*
@@ -10282,12 +10315,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
10282 vmx_set_efer(vcpu, vcpu->arch.efer); 10315 vmx_set_efer(vcpu, vcpu->arch.efer);
10283 10316
10284 /* Shadow page tables on either EPT or shadow page tables. */ 10317 /* Shadow page tables on either EPT or shadow page tables. */
10285 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_ept_enabled, 10318 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
10286 entry_failure_code)) 10319 entry_failure_code))
10287 return 1; 10320 return 1;
10288 10321
10289 kvm_mmu_reset_context(vcpu);
10290
10291 if (!enable_ept) 10322 if (!enable_ept)
10292 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; 10323 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
10293 10324
@@ -11056,6 +11087,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
11056 vmx->nested.change_vmcs01_virtual_x2apic_mode = false; 11087 vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
11057 vmx_set_virtual_x2apic_mode(vcpu, 11088 vmx_set_virtual_x2apic_mode(vcpu,
11058 vcpu->arch.apic_base & X2APIC_ENABLE); 11089 vcpu->arch.apic_base & X2APIC_ENABLE);
11090 } else if (!nested_cpu_has_ept(vmcs12) &&
11091 nested_cpu_has2(vmcs12,
11092 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
11093 vmx_flush_tlb_ept_only(vcpu);
11059 } 11094 }
11060 11095
11061 /* This is needed for same reason as it was needed in prepare_vmcs02 */ 11096 /* This is needed for same reason as it was needed in prepare_vmcs02 */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1faf620a6fdc..ccbd45ecd41a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8153,11 +8153,12 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
8153 if (kvm_x86_ops->vm_destroy) 8153 if (kvm_x86_ops->vm_destroy)
8154 kvm_x86_ops->vm_destroy(kvm); 8154 kvm_x86_ops->vm_destroy(kvm);
8155 kvm_iommu_unmap_guest(kvm); 8155 kvm_iommu_unmap_guest(kvm);
8156 kfree(kvm->arch.vpic); 8156 kvm_pic_destroy(kvm);
8157 kfree(kvm->arch.vioapic); 8157 kvm_ioapic_destroy(kvm);
8158 kvm_free_vcpus(kvm); 8158 kvm_free_vcpus(kvm);
8159 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); 8159 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
8160 kvm_mmu_uninit_vm(kvm); 8160 kvm_mmu_uninit_vm(kvm);
8161 kvm_page_track_cleanup(kvm);
8161} 8162}
8162 8163
8163void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 8164void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
@@ -8566,11 +8567,11 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
8566{ 8567{
8567 struct x86_exception fault; 8568 struct x86_exception fault;
8568 8569
8569 trace_kvm_async_pf_ready(work->arch.token, work->gva);
8570 if (work->wakeup_all) 8570 if (work->wakeup_all)
8571 work->arch.token = ~0; /* broadcast wakeup */ 8571 work->arch.token = ~0; /* broadcast wakeup */
8572 else 8572 else
8573 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); 8573 kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
8574 trace_kvm_async_pf_ready(work->arch.token, work->gva);
8574 8575
8575 if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && 8576 if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
8576 !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { 8577 !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 779782f58324..9a53a06e5a3e 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled)
290 _ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail) 290 _ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail)
291 _ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail) 291 _ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail)
292 _ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail) 292 _ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail)
293 _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail) 293 _ASM_EXTABLE_FAULT(.L_cache_w2, .L_memcpy_mcsafe_fail)
294 _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail) 294 _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
295 _ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail) 295 _ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail)
296 _ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail) 296 _ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail)
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 22af912d66d2..889e7619a091 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -643,21 +643,40 @@ void __init init_mem_mapping(void)
643 * devmem_is_allowed() checks to see if /dev/mem access to a certain address 643 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
644 * is valid. The argument is a physical page number. 644 * is valid. The argument is a physical page number.
645 * 645 *
646 * 646 * On x86, access has to be given to the first megabyte of RAM because that
647 * On x86, access has to be given to the first megabyte of ram because that area 647 * area traditionally contains BIOS code and data regions used by X, dosemu,
648 * contains BIOS code and data regions used by X and dosemu and similar apps. 648 * and similar apps. Since they map the entire memory range, the whole range
649 * Access has to be given to non-kernel-ram areas as well, these contain the PCI 649 * must be allowed (for mapping), but any areas that would otherwise be
650 * mmio resources as well as potential bios/acpi data regions. 650 * disallowed are flagged as being "zero filled" instead of rejected.
651 * Access has to be given to non-kernel-ram areas as well, these contain the
652 * PCI mmio resources as well as potential bios/acpi data regions.
651 */ 653 */
652int devmem_is_allowed(unsigned long pagenr) 654int devmem_is_allowed(unsigned long pagenr)
653{ 655{
654 if (pagenr < 256) 656 if (page_is_ram(pagenr)) {
655 return 1; 657 /*
656 if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) 658 * For disallowed memory regions in the low 1MB range,
659 * request that the page be shown as all zeros.
660 */
661 if (pagenr < 256)
662 return 2;
663
664 return 0;
665 }
666
667 /*
668 * This must follow RAM test, since System RAM is considered a
669 * restricted resource under CONFIG_STRICT_IOMEM.
670 */
671 if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
672 /* Low 1MB bypasses iomem restrictions. */
673 if (pagenr < 256)
674 return 1;
675
657 return 0; 676 return 0;
658 if (!page_is_ram(pagenr)) 677 }
659 return 1; 678
660 return 0; 679 return 1;
661} 680}
662 681
663void free_init_pages(char *what, unsigned long begin, unsigned long end) 682void free_init_pages(char *what, unsigned long begin, unsigned long end)
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 887e57182716..aed206475aa7 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -48,7 +48,7 @@ static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
48#if defined(CONFIG_X86_ESPFIX64) 48#if defined(CONFIG_X86_ESPFIX64)
49static const unsigned long vaddr_end = ESPFIX_BASE_ADDR; 49static const unsigned long vaddr_end = ESPFIX_BASE_ADDR;
50#elif defined(CONFIG_EFI) 50#elif defined(CONFIG_EFI)
51static const unsigned long vaddr_end = EFI_VA_START; 51static const unsigned long vaddr_end = EFI_VA_END;
52#else 52#else
53static const unsigned long vaddr_end = __START_KERNEL_map; 53static const unsigned long vaddr_end = __START_KERNEL_map;
54#endif 54#endif
@@ -105,7 +105,7 @@ void __init kernel_randomize_memory(void)
105 */ 105 */
106 BUILD_BUG_ON(vaddr_start >= vaddr_end); 106 BUILD_BUG_ON(vaddr_start >= vaddr_end);
107 BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) && 107 BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) &&
108 vaddr_end >= EFI_VA_START); 108 vaddr_end >= EFI_VA_END);
109 BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) || 109 BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
110 IS_ENABLED(CONFIG_EFI)) && 110 IS_ENABLED(CONFIG_EFI)) &&
111 vaddr_end >= __START_KERNEL_map); 111 vaddr_end >= __START_KERNEL_map);
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 30031d5293c4..cdfe8c628959 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -201,6 +201,10 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
201 return; 201 return;
202 } 202 }
203 203
204 /* No need to reserve regions that will never be freed. */
205 if (md.attribute & EFI_MEMORY_RUNTIME)
206 return;
207
204 size += addr % EFI_PAGE_SIZE; 208 size += addr % EFI_PAGE_SIZE;
205 size = round_up(size, EFI_PAGE_SIZE); 209 size = round_up(size, EFI_PAGE_SIZE);
206 addr = round_down(addr, EFI_PAGE_SIZE); 210 addr = round_down(addr, EFI_PAGE_SIZE);
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index 555b9fa0ad43..7dbdb780264d 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -8,6 +8,7 @@ PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
8LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib 8LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
9targets += purgatory.ro 9targets += purgatory.ro
10 10
11KASAN_SANITIZE := n
11KCOV_INSTRUMENT := n 12KCOV_INSTRUMENT := n
12 13
13# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That 14# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index 976b1d70edbc..4ddbfd57a7c8 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -164,8 +164,21 @@ void copy_user_highpage(struct page *to, struct page *from,
164 164
165#define ARCH_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) 165#define ARCH_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
166 166
167#ifdef CONFIG_MMU
168static inline unsigned long ___pa(unsigned long va)
169{
170 unsigned long off = va - PAGE_OFFSET;
171
172 if (off >= XCHAL_KSEG_SIZE)
173 off -= XCHAL_KSEG_SIZE;
174
175 return off + PHYS_OFFSET;
176}
177#define __pa(x) ___pa((unsigned long)(x))
178#else
167#define __pa(x) \ 179#define __pa(x) \
168 ((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET) 180 ((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET)
181#endif
169#define __va(x) \ 182#define __va(x) \
170 ((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET)) 183 ((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET))
171#define pfn_valid(pfn) \ 184#define pfn_valid(pfn) \
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
index cd400af4a6b2..6be7eb27fd29 100644
--- a/arch/xtensa/include/uapi/asm/unistd.h
+++ b/arch/xtensa/include/uapi/asm/unistd.h
@@ -774,7 +774,10 @@ __SYSCALL(349, sys_pkey_alloc, 2)
774#define __NR_pkey_free 350 774#define __NR_pkey_free 350
775__SYSCALL(350, sys_pkey_free, 1) 775__SYSCALL(350, sys_pkey_free, 1)
776 776
777#define __NR_syscall_count 351 777#define __NR_statx 351
778__SYSCALL(351, sys_statx, 5)
779
780#define __NR_syscall_count 352
778 781
779/* 782/*
780 * sysxtensa syscall handler 783 * sysxtensa syscall handler
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index c82c43bff296..bae697a06a98 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -483,10 +483,8 @@ void show_regs(struct pt_regs * regs)
483 483
484static int show_trace_cb(struct stackframe *frame, void *data) 484static int show_trace_cb(struct stackframe *frame, void *data)
485{ 485{
486 if (kernel_text_address(frame->pc)) { 486 if (kernel_text_address(frame->pc))
487 pr_cont(" [<%08lx>]", frame->pc); 487 pr_cont(" [<%08lx>] %pB\n", frame->pc, (void *)frame->pc);
488 print_symbol(" %s\n", frame->pc);
489 }
490 return 0; 488 return 0;
491} 489}
492 490
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 09af8ff18719..c974a1bbf4cb 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -171,7 +171,8 @@ void blk_mq_sched_put_request(struct request *rq)
171 171
172void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) 172void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
173{ 173{
174 struct elevator_queue *e = hctx->queue->elevator; 174 struct request_queue *q = hctx->queue;
175 struct elevator_queue *e = q->elevator;
175 const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request; 176 const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request;
176 bool did_work = false; 177 bool did_work = false;
177 LIST_HEAD(rq_list); 178 LIST_HEAD(rq_list);
@@ -203,10 +204,10 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
203 */ 204 */
204 if (!list_empty(&rq_list)) { 205 if (!list_empty(&rq_list)) {
205 blk_mq_sched_mark_restart_hctx(hctx); 206 blk_mq_sched_mark_restart_hctx(hctx);
206 did_work = blk_mq_dispatch_rq_list(hctx, &rq_list); 207 did_work = blk_mq_dispatch_rq_list(q, &rq_list);
207 } else if (!has_sched_dispatch) { 208 } else if (!has_sched_dispatch) {
208 blk_mq_flush_busy_ctxs(hctx, &rq_list); 209 blk_mq_flush_busy_ctxs(hctx, &rq_list);
209 blk_mq_dispatch_rq_list(hctx, &rq_list); 210 blk_mq_dispatch_rq_list(q, &rq_list);
210 } 211 }
211 212
212 /* 213 /*
@@ -222,7 +223,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
222 if (!rq) 223 if (!rq)
223 break; 224 break;
224 list_add(&rq->queuelist, &rq_list); 225 list_add(&rq->queuelist, &rq_list);
225 } while (blk_mq_dispatch_rq_list(hctx, &rq_list)); 226 } while (blk_mq_dispatch_rq_list(q, &rq_list));
226 } 227 }
227} 228}
228 229
@@ -317,25 +318,68 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
317 return true; 318 return true;
318} 319}
319 320
320static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) 321static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
321{ 322{
322 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) { 323 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
323 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 324 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
324 if (blk_mq_hctx_has_pending(hctx)) 325 if (blk_mq_hctx_has_pending(hctx)) {
325 blk_mq_run_hw_queue(hctx, true); 326 blk_mq_run_hw_queue(hctx, true);
327 return true;
328 }
326 } 329 }
330 return false;
327} 331}
328 332
329void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx) 333/**
330{ 334 * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
331 struct request_queue *q = hctx->queue; 335 * @pos: loop cursor.
332 unsigned int i; 336 * @skip: the list element that will not be examined. Iteration starts at
337 * @skip->next.
338 * @head: head of the list to examine. This list must have at least one
339 * element, namely @skip.
340 * @member: name of the list_head structure within typeof(*pos).
341 */
342#define list_for_each_entry_rcu_rr(pos, skip, head, member) \
343 for ((pos) = (skip); \
344 (pos = (pos)->member.next != (head) ? list_entry_rcu( \
345 (pos)->member.next, typeof(*pos), member) : \
346 list_entry_rcu((pos)->member.next->next, typeof(*pos), member)), \
347 (pos) != (skip); )
333 348
334 if (test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) { 349/*
335 if (test_and_clear_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) { 350 * Called after a driver tag has been freed to check whether a hctx needs to
336 queue_for_each_hw_ctx(q, hctx, i) 351 * be restarted. Restarts @hctx if its tag set is not shared. Restarts hardware
337 blk_mq_sched_restart_hctx(hctx); 352 * queues in a round-robin fashion if the tag set of @hctx is shared with other
353 * hardware queues.
354 */
355void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx)
356{
357 struct blk_mq_tags *const tags = hctx->tags;
358 struct blk_mq_tag_set *const set = hctx->queue->tag_set;
359 struct request_queue *const queue = hctx->queue, *q;
360 struct blk_mq_hw_ctx *hctx2;
361 unsigned int i, j;
362
363 if (set->flags & BLK_MQ_F_TAG_SHARED) {
364 rcu_read_lock();
365 list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
366 tag_set_list) {
367 queue_for_each_hw_ctx(q, hctx2, i)
368 if (hctx2->tags == tags &&
369 blk_mq_sched_restart_hctx(hctx2))
370 goto done;
371 }
372 j = hctx->queue_num + 1;
373 for (i = 0; i < queue->nr_hw_queues; i++, j++) {
374 if (j == queue->nr_hw_queues)
375 j = 0;
376 hctx2 = queue->queue_hw_ctx[j];
377 if (hctx2->tags == tags &&
378 blk_mq_sched_restart_hctx(hctx2))
379 break;
338 } 380 }
381done:
382 rcu_read_unlock();
339 } else { 383 } else {
340 blk_mq_sched_restart_hctx(hctx); 384 blk_mq_sched_restart_hctx(hctx);
341 } 385 }
@@ -431,11 +475,67 @@ static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
431 } 475 }
432} 476}
433 477
434int blk_mq_sched_setup(struct request_queue *q) 478static int blk_mq_sched_alloc_tags(struct request_queue *q,
479 struct blk_mq_hw_ctx *hctx,
480 unsigned int hctx_idx)
481{
482 struct blk_mq_tag_set *set = q->tag_set;
483 int ret;
484
485 hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
486 set->reserved_tags);
487 if (!hctx->sched_tags)
488 return -ENOMEM;
489
490 ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
491 if (ret)
492 blk_mq_sched_free_tags(set, hctx, hctx_idx);
493
494 return ret;
495}
496
497static void blk_mq_sched_tags_teardown(struct request_queue *q)
435{ 498{
436 struct blk_mq_tag_set *set = q->tag_set; 499 struct blk_mq_tag_set *set = q->tag_set;
437 struct blk_mq_hw_ctx *hctx; 500 struct blk_mq_hw_ctx *hctx;
438 int ret, i; 501 int i;
502
503 queue_for_each_hw_ctx(q, hctx, i)
504 blk_mq_sched_free_tags(set, hctx, i);
505}
506
507int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
508 unsigned int hctx_idx)
509{
510 struct elevator_queue *e = q->elevator;
511
512 if (!e)
513 return 0;
514
515 return blk_mq_sched_alloc_tags(q, hctx, hctx_idx);
516}
517
518void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
519 unsigned int hctx_idx)
520{
521 struct elevator_queue *e = q->elevator;
522
523 if (!e)
524 return;
525
526 blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
527}
528
529int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
530{
531 struct blk_mq_hw_ctx *hctx;
532 unsigned int i;
533 int ret;
534
535 if (!e) {
536 q->elevator = NULL;
537 return 0;
538 }
439 539
440 /* 540 /*
441 * Default to 256, since we don't split into sync/async like the 541 * Default to 256, since we don't split into sync/async like the
@@ -443,49 +543,30 @@ int blk_mq_sched_setup(struct request_queue *q)
443 */ 543 */
444 q->nr_requests = 2 * BLKDEV_MAX_RQ; 544 q->nr_requests = 2 * BLKDEV_MAX_RQ;
445 545
446 /*
447 * We're switching to using an IO scheduler, so setup the hctx
448 * scheduler tags and switch the request map from the regular
449 * tags to scheduler tags. First allocate what we need, so we
450 * can safely fail and fallback, if needed.
451 */
452 ret = 0;
453 queue_for_each_hw_ctx(q, hctx, i) { 546 queue_for_each_hw_ctx(q, hctx, i) {
454 hctx->sched_tags = blk_mq_alloc_rq_map(set, i, 547 ret = blk_mq_sched_alloc_tags(q, hctx, i);
455 q->nr_requests, set->reserved_tags);
456 if (!hctx->sched_tags) {
457 ret = -ENOMEM;
458 break;
459 }
460 ret = blk_mq_alloc_rqs(set, hctx->sched_tags, i, q->nr_requests);
461 if (ret) 548 if (ret)
462 break; 549 goto err;
463 } 550 }
464 551
465 /* 552 ret = e->ops.mq.init_sched(q, e);
466 * If we failed, free what we did allocate 553 if (ret)
467 */ 554 goto err;
468 if (ret) {
469 queue_for_each_hw_ctx(q, hctx, i) {
470 if (!hctx->sched_tags)
471 continue;
472 blk_mq_sched_free_tags(set, hctx, i);
473 }
474
475 return ret;
476 }
477 555
478 return 0; 556 return 0;
557
558err:
559 blk_mq_sched_tags_teardown(q);
560 q->elevator = NULL;
561 return ret;
479} 562}
480 563
481void blk_mq_sched_teardown(struct request_queue *q) 564void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
482{ 565{
483 struct blk_mq_tag_set *set = q->tag_set; 566 if (e->type->ops.mq.exit_sched)
484 struct blk_mq_hw_ctx *hctx; 567 e->type->ops.mq.exit_sched(e);
485 int i; 568 blk_mq_sched_tags_teardown(q);
486 569 q->elevator = NULL;
487 queue_for_each_hw_ctx(q, hctx, i)
488 blk_mq_sched_free_tags(set, hctx, i);
489} 570}
490 571
491int blk_mq_sched_init(struct request_queue *q) 572int blk_mq_sched_init(struct request_queue *q)
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index a75b16b123f7..3a9e6e40558b 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -19,7 +19,7 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
19 struct request **merged_request); 19 struct request **merged_request);
20bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio); 20bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
21bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq); 21bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
22void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx); 22void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
23 23
24void blk_mq_sched_insert_request(struct request *rq, bool at_head, 24void blk_mq_sched_insert_request(struct request *rq, bool at_head,
25 bool run_queue, bool async, bool can_block); 25 bool run_queue, bool async, bool can_block);
@@ -32,8 +32,13 @@ void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
32 struct list_head *rq_list, 32 struct list_head *rq_list,
33 struct request *(*get_rq)(struct blk_mq_hw_ctx *)); 33 struct request *(*get_rq)(struct blk_mq_hw_ctx *));
34 34
35int blk_mq_sched_setup(struct request_queue *q); 35int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
36void blk_mq_sched_teardown(struct request_queue *q); 36void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
37
38int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
39 unsigned int hctx_idx);
40void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
41 unsigned int hctx_idx);
37 42
38int blk_mq_sched_init(struct request_queue *q); 43int blk_mq_sched_init(struct request_queue *q);
39 44
@@ -131,20 +136,6 @@ static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
131 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 136 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
132} 137}
133 138
134/*
135 * Mark a hardware queue and the request queue it belongs to as needing a
136 * restart.
137 */
138static inline void blk_mq_sched_mark_restart_queue(struct blk_mq_hw_ctx *hctx)
139{
140 struct request_queue *q = hctx->queue;
141
142 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
143 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
144 if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags))
145 set_bit(QUEUE_FLAG_RESTART, &q->queue_flags);
146}
147
148static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) 139static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
149{ 140{
150 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 141 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 08a49c69738b..572966f49596 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -321,7 +321,6 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
321 321
322 rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data); 322 rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
323 323
324 blk_mq_put_ctx(alloc_data.ctx);
325 blk_queue_exit(q); 324 blk_queue_exit(q);
326 325
327 if (!rq) 326 if (!rq)
@@ -349,7 +348,7 @@ void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
349 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag); 348 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
350 if (sched_tag != -1) 349 if (sched_tag != -1)
351 blk_mq_sched_completed_request(hctx, rq); 350 blk_mq_sched_completed_request(hctx, rq);
352 blk_mq_sched_restart_queues(hctx); 351 blk_mq_sched_restart(hctx);
353 blk_queue_exit(q); 352 blk_queue_exit(q);
354} 353}
355 354
@@ -846,12 +845,8 @@ bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
846 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT, 845 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
847 }; 846 };
848 847
849 if (rq->tag != -1) { 848 if (rq->tag != -1)
850done: 849 goto done;
851 if (hctx)
852 *hctx = data.hctx;
853 return true;
854 }
855 850
856 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag)) 851 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
857 data.flags |= BLK_MQ_REQ_RESERVED; 852 data.flags |= BLK_MQ_REQ_RESERVED;
@@ -863,10 +858,12 @@ done:
863 atomic_inc(&data.hctx->nr_active); 858 atomic_inc(&data.hctx->nr_active);
864 } 859 }
865 data.hctx->tags->rqs[rq->tag] = rq; 860 data.hctx->tags->rqs[rq->tag] = rq;
866 goto done;
867 } 861 }
868 862
869 return false; 863done:
864 if (hctx)
865 *hctx = data.hctx;
866 return rq->tag != -1;
870} 867}
871 868
872static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, 869static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
@@ -963,13 +960,16 @@ static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx)
963 return true; 960 return true;
964} 961}
965 962
966bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) 963bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
967{ 964{
968 struct request_queue *q = hctx->queue; 965 struct blk_mq_hw_ctx *hctx;
969 struct request *rq; 966 struct request *rq;
970 LIST_HEAD(driver_list); 967 LIST_HEAD(driver_list);
971 struct list_head *dptr; 968 struct list_head *dptr;
972 int queued, ret = BLK_MQ_RQ_QUEUE_OK; 969 int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK;
970
971 if (list_empty(list))
972 return false;
973 973
974 /* 974 /*
975 * Start off with dptr being NULL, so we start the first request 975 * Start off with dptr being NULL, so we start the first request
@@ -980,8 +980,8 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
980 /* 980 /*
981 * Now process all the entries, sending them to the driver. 981 * Now process all the entries, sending them to the driver.
982 */ 982 */
983 queued = 0; 983 errors = queued = 0;
984 while (!list_empty(list)) { 984 do {
985 struct blk_mq_queue_data bd; 985 struct blk_mq_queue_data bd;
986 986
987 rq = list_first_entry(list, struct request, queuelist); 987 rq = list_first_entry(list, struct request, queuelist);
@@ -1037,6 +1037,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1037 default: 1037 default:
1038 pr_err("blk-mq: bad return on queue: %d\n", ret); 1038 pr_err("blk-mq: bad return on queue: %d\n", ret);
1039 case BLK_MQ_RQ_QUEUE_ERROR: 1039 case BLK_MQ_RQ_QUEUE_ERROR:
1040 errors++;
1040 rq->errors = -EIO; 1041 rq->errors = -EIO;
1041 blk_mq_end_request(rq, rq->errors); 1042 blk_mq_end_request(rq, rq->errors);
1042 break; 1043 break;
@@ -1051,7 +1052,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1051 */ 1052 */
1052 if (!dptr && list->next != list->prev) 1053 if (!dptr && list->next != list->prev)
1053 dptr = &driver_list; 1054 dptr = &driver_list;
1054 } 1055 } while (!list_empty(list));
1055 1056
1056 hctx->dispatched[queued_to_index(queued)]++; 1057 hctx->dispatched[queued_to_index(queued)]++;
1057 1058
@@ -1088,7 +1089,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1088 blk_mq_run_hw_queue(hctx, true); 1089 blk_mq_run_hw_queue(hctx, true);
1089 } 1090 }
1090 1091
1091 return queued != 0; 1092 return (queued + errors) != 0;
1092} 1093}
1093 1094
1094static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) 1095static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
@@ -1134,7 +1135,8 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1134 return hctx->next_cpu; 1135 return hctx->next_cpu;
1135} 1136}
1136 1137
1137void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 1138static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1139 unsigned long msecs)
1138{ 1140{
1139 if (unlikely(blk_mq_hctx_stopped(hctx) || 1141 if (unlikely(blk_mq_hctx_stopped(hctx) ||
1140 !blk_mq_hw_queue_mapped(hctx))) 1142 !blk_mq_hw_queue_mapped(hctx)))
@@ -1151,7 +1153,24 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1151 put_cpu(); 1153 put_cpu();
1152 } 1154 }
1153 1155
1154 kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work); 1156 if (msecs == 0)
1157 kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx),
1158 &hctx->run_work);
1159 else
1160 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1161 &hctx->delayed_run_work,
1162 msecs_to_jiffies(msecs));
1163}
1164
1165void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1166{
1167 __blk_mq_delay_run_hw_queue(hctx, true, msecs);
1168}
1169EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1170
1171void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1172{
1173 __blk_mq_delay_run_hw_queue(hctx, async, 0);
1155} 1174}
1156 1175
1157void blk_mq_run_hw_queues(struct request_queue *q, bool async) 1176void blk_mq_run_hw_queues(struct request_queue *q, bool async)
@@ -1254,6 +1273,15 @@ static void blk_mq_run_work_fn(struct work_struct *work)
1254 __blk_mq_run_hw_queue(hctx); 1273 __blk_mq_run_hw_queue(hctx);
1255} 1274}
1256 1275
1276static void blk_mq_delayed_run_work_fn(struct work_struct *work)
1277{
1278 struct blk_mq_hw_ctx *hctx;
1279
1280 hctx = container_of(work, struct blk_mq_hw_ctx, delayed_run_work.work);
1281
1282 __blk_mq_run_hw_queue(hctx);
1283}
1284
1257static void blk_mq_delay_work_fn(struct work_struct *work) 1285static void blk_mq_delay_work_fn(struct work_struct *work)
1258{ 1286{
1259 struct blk_mq_hw_ctx *hctx; 1287 struct blk_mq_hw_ctx *hctx;
@@ -1923,6 +1951,8 @@ static void blk_mq_exit_hctx(struct request_queue *q,
1923 hctx->fq->flush_rq, hctx_idx, 1951 hctx->fq->flush_rq, hctx_idx,
1924 flush_start_tag + hctx_idx); 1952 flush_start_tag + hctx_idx);
1925 1953
1954 blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
1955
1926 if (set->ops->exit_hctx) 1956 if (set->ops->exit_hctx)
1927 set->ops->exit_hctx(hctx, hctx_idx); 1957 set->ops->exit_hctx(hctx, hctx_idx);
1928 1958
@@ -1959,6 +1989,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
1959 node = hctx->numa_node = set->numa_node; 1989 node = hctx->numa_node = set->numa_node;
1960 1990
1961 INIT_WORK(&hctx->run_work, blk_mq_run_work_fn); 1991 INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
1992 INIT_DELAYED_WORK(&hctx->delayed_run_work, blk_mq_delayed_run_work_fn);
1962 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn); 1993 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1963 spin_lock_init(&hctx->lock); 1994 spin_lock_init(&hctx->lock);
1964 INIT_LIST_HEAD(&hctx->dispatch); 1995 INIT_LIST_HEAD(&hctx->dispatch);
@@ -1989,9 +2020,12 @@ static int blk_mq_init_hctx(struct request_queue *q,
1989 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) 2020 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1990 goto free_bitmap; 2021 goto free_bitmap;
1991 2022
2023 if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
2024 goto exit_hctx;
2025
1992 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size); 2026 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1993 if (!hctx->fq) 2027 if (!hctx->fq)
1994 goto exit_hctx; 2028 goto sched_exit_hctx;
1995 2029
1996 if (set->ops->init_request && 2030 if (set->ops->init_request &&
1997 set->ops->init_request(set->driver_data, 2031 set->ops->init_request(set->driver_data,
@@ -2006,6 +2040,8 @@ static int blk_mq_init_hctx(struct request_queue *q,
2006 2040
2007 free_fq: 2041 free_fq:
2008 kfree(hctx->fq); 2042 kfree(hctx->fq);
2043 sched_exit_hctx:
2044 blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
2009 exit_hctx: 2045 exit_hctx:
2010 if (set->ops->exit_hctx) 2046 if (set->ops->exit_hctx)
2011 set->ops->exit_hctx(hctx, hctx_idx); 2047 set->ops->exit_hctx(hctx, hctx_idx);
@@ -2232,8 +2268,6 @@ void blk_mq_release(struct request_queue *q)
2232 struct blk_mq_hw_ctx *hctx; 2268 struct blk_mq_hw_ctx *hctx;
2233 unsigned int i; 2269 unsigned int i;
2234 2270
2235 blk_mq_sched_teardown(q);
2236
2237 /* hctx kobj stays in hctx */ 2271 /* hctx kobj stays in hctx */
2238 queue_for_each_hw_ctx(q, hctx, i) { 2272 queue_for_each_hw_ctx(q, hctx, i) {
2239 if (!hctx) 2273 if (!hctx)
@@ -2564,6 +2598,14 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2564 return 0; 2598 return 0;
2565} 2599}
2566 2600
2601static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
2602{
2603 if (set->ops->map_queues)
2604 return set->ops->map_queues(set);
2605 else
2606 return blk_mq_map_queues(set);
2607}
2608
2567/* 2609/*
2568 * Alloc a tag set to be associated with one or more request queues. 2610 * Alloc a tag set to be associated with one or more request queues.
2569 * May fail with EINVAL for various error conditions. May adjust the 2611 * May fail with EINVAL for various error conditions. May adjust the
@@ -2618,10 +2660,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2618 if (!set->mq_map) 2660 if (!set->mq_map)
2619 goto out_free_tags; 2661 goto out_free_tags;
2620 2662
2621 if (set->ops->map_queues) 2663 ret = blk_mq_update_queue_map(set);
2622 ret = set->ops->map_queues(set);
2623 else
2624 ret = blk_mq_map_queues(set);
2625 if (ret) 2664 if (ret)
2626 goto out_free_mq_map; 2665 goto out_free_mq_map;
2627 2666
@@ -2713,6 +2752,7 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2713 blk_mq_freeze_queue(q); 2752 blk_mq_freeze_queue(q);
2714 2753
2715 set->nr_hw_queues = nr_hw_queues; 2754 set->nr_hw_queues = nr_hw_queues;
2755 blk_mq_update_queue_map(set);
2716 list_for_each_entry(q, &set->tag_list, tag_set_list) { 2756 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2717 blk_mq_realloc_hw_ctxs(set, q); 2757 blk_mq_realloc_hw_ctxs(set, q);
2718 2758
diff --git a/block/blk-mq.h b/block/blk-mq.h
index b79f9a7d8cf6..660a17e1d033 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -31,7 +31,7 @@ void blk_mq_freeze_queue(struct request_queue *q);
31void blk_mq_free_queue(struct request_queue *q); 31void blk_mq_free_queue(struct request_queue *q);
32int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); 32int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
33void blk_mq_wake_waiters(struct request_queue *q); 33void blk_mq_wake_waiters(struct request_queue *q);
34bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *); 34bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *);
35void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); 35void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
36bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx); 36bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
37bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, 37bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index c44b321335f3..37f0b3ad635e 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -816,7 +816,7 @@ static void blk_release_queue(struct kobject *kobj)
816 816
817 if (q->elevator) { 817 if (q->elevator) {
818 ioc_clear_queue(q); 818 ioc_clear_queue(q);
819 elevator_exit(q->elevator); 819 elevator_exit(q, q->elevator);
820 } 820 }
821 821
822 blk_exit_rl(&q->root_rl); 822 blk_exit_rl(&q->root_rl);
diff --git a/block/elevator.c b/block/elevator.c
index 01139f549b5b..dbeecf7be719 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -242,26 +242,21 @@ int elevator_init(struct request_queue *q, char *name)
242 } 242 }
243 } 243 }
244 244
245 if (e->uses_mq) { 245 if (e->uses_mq)
246 err = blk_mq_sched_setup(q); 246 err = blk_mq_init_sched(q, e);
247 if (!err) 247 else
248 err = e->ops.mq.init_sched(q, e);
249 } else
250 err = e->ops.sq.elevator_init_fn(q, e); 248 err = e->ops.sq.elevator_init_fn(q, e);
251 if (err) { 249 if (err)
252 if (e->uses_mq)
253 blk_mq_sched_teardown(q);
254 elevator_put(e); 250 elevator_put(e);
255 }
256 return err; 251 return err;
257} 252}
258EXPORT_SYMBOL(elevator_init); 253EXPORT_SYMBOL(elevator_init);
259 254
260void elevator_exit(struct elevator_queue *e) 255void elevator_exit(struct request_queue *q, struct elevator_queue *e)
261{ 256{
262 mutex_lock(&e->sysfs_lock); 257 mutex_lock(&e->sysfs_lock);
263 if (e->uses_mq && e->type->ops.mq.exit_sched) 258 if (e->uses_mq && e->type->ops.mq.exit_sched)
264 e->type->ops.mq.exit_sched(e); 259 blk_mq_exit_sched(q, e);
265 else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn) 260 else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
266 e->type->ops.sq.elevator_exit_fn(e); 261 e->type->ops.sq.elevator_exit_fn(e);
267 mutex_unlock(&e->sysfs_lock); 262 mutex_unlock(&e->sysfs_lock);
@@ -946,6 +941,45 @@ void elv_unregister(struct elevator_type *e)
946} 941}
947EXPORT_SYMBOL_GPL(elv_unregister); 942EXPORT_SYMBOL_GPL(elv_unregister);
948 943
944static int elevator_switch_mq(struct request_queue *q,
945 struct elevator_type *new_e)
946{
947 int ret;
948
949 blk_mq_freeze_queue(q);
950 blk_mq_quiesce_queue(q);
951
952 if (q->elevator) {
953 if (q->elevator->registered)
954 elv_unregister_queue(q);
955 ioc_clear_queue(q);
956 elevator_exit(q, q->elevator);
957 }
958
959 ret = blk_mq_init_sched(q, new_e);
960 if (ret)
961 goto out;
962
963 if (new_e) {
964 ret = elv_register_queue(q);
965 if (ret) {
966 elevator_exit(q, q->elevator);
967 goto out;
968 }
969 }
970
971 if (new_e)
972 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
973 else
974 blk_add_trace_msg(q, "elv switch: none");
975
976out:
977 blk_mq_unfreeze_queue(q);
978 blk_mq_start_stopped_hw_queues(q, true);
979 return ret;
980
981}
982
949/* 983/*
950 * switch to new_e io scheduler. be careful not to introduce deadlocks - 984 * switch to new_e io scheduler. be careful not to introduce deadlocks -
951 * we don't free the old io scheduler, before we have allocated what we 985 * we don't free the old io scheduler, before we have allocated what we
@@ -958,10 +992,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
958 bool old_registered = false; 992 bool old_registered = false;
959 int err; 993 int err;
960 994
961 if (q->mq_ops) { 995 if (q->mq_ops)
962 blk_mq_freeze_queue(q); 996 return elevator_switch_mq(q, new_e);
963 blk_mq_quiesce_queue(q);
964 }
965 997
966 /* 998 /*
967 * Turn on BYPASS and drain all requests w/ elevator private data. 999 * Turn on BYPASS and drain all requests w/ elevator private data.
@@ -973,11 +1005,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
973 if (old) { 1005 if (old) {
974 old_registered = old->registered; 1006 old_registered = old->registered;
975 1007
976 if (old->uses_mq) 1008 blk_queue_bypass_start(q);
977 blk_mq_sched_teardown(q);
978
979 if (!q->mq_ops)
980 blk_queue_bypass_start(q);
981 1009
982 /* unregister and clear all auxiliary data of the old elevator */ 1010 /* unregister and clear all auxiliary data of the old elevator */
983 if (old_registered) 1011 if (old_registered)
@@ -987,56 +1015,32 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
987 } 1015 }
988 1016
989 /* allocate, init and register new elevator */ 1017 /* allocate, init and register new elevator */
990 if (new_e) { 1018 err = new_e->ops.sq.elevator_init_fn(q, new_e);
991 if (new_e->uses_mq) { 1019 if (err)
992 err = blk_mq_sched_setup(q); 1020 goto fail_init;
993 if (!err)
994 err = new_e->ops.mq.init_sched(q, new_e);
995 } else
996 err = new_e->ops.sq.elevator_init_fn(q, new_e);
997 if (err)
998 goto fail_init;
999 1021
1000 err = elv_register_queue(q); 1022 err = elv_register_queue(q);
1001 if (err) 1023 if (err)
1002 goto fail_register; 1024 goto fail_register;
1003 } else
1004 q->elevator = NULL;
1005 1025
1006 /* done, kill the old one and finish */ 1026 /* done, kill the old one and finish */
1007 if (old) { 1027 if (old) {
1008 elevator_exit(old); 1028 elevator_exit(q, old);
1009 if (!q->mq_ops) 1029 blk_queue_bypass_end(q);
1010 blk_queue_bypass_end(q);
1011 } 1030 }
1012 1031
1013 if (q->mq_ops) { 1032 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
1014 blk_mq_unfreeze_queue(q);
1015 blk_mq_start_stopped_hw_queues(q, true);
1016 }
1017
1018 if (new_e)
1019 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
1020 else
1021 blk_add_trace_msg(q, "elv switch: none");
1022 1033
1023 return 0; 1034 return 0;
1024 1035
1025fail_register: 1036fail_register:
1026 if (q->mq_ops) 1037 elevator_exit(q, q->elevator);
1027 blk_mq_sched_teardown(q);
1028 elevator_exit(q->elevator);
1029fail_init: 1038fail_init:
1030 /* switch failed, restore and re-register old elevator */ 1039 /* switch failed, restore and re-register old elevator */
1031 if (old) { 1040 if (old) {
1032 q->elevator = old; 1041 q->elevator = old;
1033 elv_register_queue(q); 1042 elv_register_queue(q);
1034 if (!q->mq_ops) 1043 blk_queue_bypass_end(q);
1035 blk_queue_bypass_end(q);
1036 }
1037 if (q->mq_ops) {
1038 blk_mq_unfreeze_queue(q);
1039 blk_mq_start_stopped_hw_queues(q, true);
1040 } 1044 }
1041 1045
1042 return err; 1046 return err;
diff --git a/crypto/lrw.c b/crypto/lrw.c
index ecd8474018e3..3ea095adafd9 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -286,8 +286,11 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
286 286
287 subreq->cryptlen = LRW_BUFFER_SIZE; 287 subreq->cryptlen = LRW_BUFFER_SIZE;
288 if (req->cryptlen > LRW_BUFFER_SIZE) { 288 if (req->cryptlen > LRW_BUFFER_SIZE) {
289 subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE); 289 unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
290 rctx->ext = kmalloc(subreq->cryptlen, gfp); 290
291 rctx->ext = kmalloc(n, gfp);
292 if (rctx->ext)
293 subreq->cryptlen = n;
291 } 294 }
292 295
293 rctx->src = req->src; 296 rctx->src = req->src;
diff --git a/crypto/xts.c b/crypto/xts.c
index baeb34dd8582..c976bfac29da 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -230,8 +230,11 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
230 230
231 subreq->cryptlen = XTS_BUFFER_SIZE; 231 subreq->cryptlen = XTS_BUFFER_SIZE;
232 if (req->cryptlen > XTS_BUFFER_SIZE) { 232 if (req->cryptlen > XTS_BUFFER_SIZE) {
233 subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE); 233 unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
234 rctx->ext = kmalloc(subreq->cryptlen, gfp); 234
235 rctx->ext = kmalloc(n, gfp);
236 if (rctx->ext)
237 subreq->cryptlen = n;
235 } 238 }
236 239
237 rctx->src = req->src; 240 rctx->src = req->src;
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index a391bbc48105..d94f92f88ca1 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -2,7 +2,6 @@
2# Makefile for the Linux ACPI interpreter 2# Makefile for the Linux ACPI interpreter
3# 3#
4 4
5ccflags-y := -Os
6ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT 5ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
7 6
8# 7#
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index b4c1a6a51da4..03250e1f1103 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -25,9 +25,11 @@
25ACPI_MODULE_NAME("platform"); 25ACPI_MODULE_NAME("platform");
26 26
27static const struct acpi_device_id forbidden_id_list[] = { 27static const struct acpi_device_id forbidden_id_list[] = {
28 {"PNP0000", 0}, /* PIC */ 28 {"PNP0000", 0}, /* PIC */
29 {"PNP0100", 0}, /* Timer */ 29 {"PNP0100", 0}, /* Timer */
30 {"PNP0200", 0}, /* AT DMA Controller */ 30 {"PNP0200", 0}, /* AT DMA Controller */
31 {"ACPI0009", 0}, /* IOxAPIC */
32 {"ACPI000A", 0}, /* IOAPIC */
31 {"", 0}, 33 {"", 0},
32}; 34};
33 35
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index c86bae7b1d0f..ff096d9755b9 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -421,10 +421,8 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
421 421
422 ACPI_FUNCTION_TRACE(ut_walk_aml_resources); 422 ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
423 423
424 /* 424 /* The absolute minimum resource template is one end_tag descriptor */
425 * The absolute minimum resource template is one end_tag descriptor. 425
426 * However, we will treat a lone end_tag as just a simple buffer.
427 */
428 if (aml_length < sizeof(struct aml_resource_end_tag)) { 426 if (aml_length < sizeof(struct aml_resource_end_tag)) {
429 return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); 427 return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
430 } 428 }
@@ -456,8 +454,9 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
456 /* Invoke the user function */ 454 /* Invoke the user function */
457 455
458 if (user_function) { 456 if (user_function) {
459 status = user_function(aml, length, offset, 457 status =
460 resource_index, context); 458 user_function(aml, length, offset, resource_index,
459 context);
461 if (ACPI_FAILURE(status)) { 460 if (ACPI_FAILURE(status)) {
462 return_ACPI_STATUS(status); 461 return_ACPI_STATUS(status);
463 } 462 }
@@ -481,12 +480,6 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
481 *context = aml; 480 *context = aml;
482 } 481 }
483 482
484 /* Check if buffer is defined to be longer than the resource length */
485
486 if (aml_length > (offset + length)) {
487 return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
488 }
489
490 /* Normal exit */ 483 /* Normal exit */
491 484
492 return_ACPI_STATUS(AE_OK); 485 return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index b192b42a8351..79b3c9c5a3bc 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -1073,6 +1073,7 @@ static int ghes_remove(struct platform_device *ghes_dev)
1073 if (list_empty(&ghes_sci)) 1073 if (list_empty(&ghes_sci))
1074 unregister_acpi_hed_notifier(&ghes_notifier_sci); 1074 unregister_acpi_hed_notifier(&ghes_notifier_sci);
1075 mutex_unlock(&ghes_list_mutex); 1075 mutex_unlock(&ghes_list_mutex);
1076 synchronize_rcu();
1076 break; 1077 break;
1077 case ACPI_HEST_NOTIFY_NMI: 1078 case ACPI_HEST_NOTIFY_NMI:
1078 ghes_nmi_remove(ghes); 1079 ghes_nmi_remove(ghes);
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index fb19e1cdb641..edc8663b5db3 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -99,13 +99,13 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
99 return -ENODEV; 99 return -ENODEV;
100 100
101 /* 101 /*
102 * If the device has a _HID (or _CID) returning a valid ACPI/PNP 102 * If the device has a _HID returning a valid ACPI/PNP device ID, it is
103 * device ID, it is better to make it look less attractive here, so that 103 * better to make it look less attractive here, so that the other device
104 * the other device with the same _ADR value (that may not have a valid 104 * with the same _ADR value (that may not have a valid device ID) can be
105 * device ID) can be matched going forward. [This means a second spec 105 * matched going forward. [This means a second spec violation in a row,
106 * violation in a row, so whatever we do here is best effort anyway.] 106 * so whatever we do here is best effort anyway.]
107 */ 107 */
108 return sta_present && list_empty(&adev->pnp.ids) ? 108 return sta_present && !adev->pnp.type.platform_id ?
109 FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE; 109 FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
110} 110}
111 111
diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c
index 1120dfd625b8..7e4fbf9a53a3 100644
--- a/drivers/acpi/ioapic.c
+++ b/drivers/acpi/ioapic.c
@@ -45,6 +45,12 @@ static acpi_status setup_res(struct acpi_resource *acpi_res, void *data)
45 struct resource *res = data; 45 struct resource *res = data;
46 struct resource_win win; 46 struct resource_win win;
47 47
48 /*
49 * We might assign this to 'res' later, make sure all pointers are
50 * cleared before the resource is added to the global list
51 */
52 memset(&win, 0, sizeof(win));
53
48 res->flags = 0; 54 res->flags = 0;
49 if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM)) 55 if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM))
50 return AE_OK; 56 return AE_OK;
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 662036bdc65e..c8ea9d698cd0 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1617,7 +1617,11 @@ static int cmp_map(const void *m0, const void *m1)
1617 const struct nfit_set_info_map *map0 = m0; 1617 const struct nfit_set_info_map *map0 = m0;
1618 const struct nfit_set_info_map *map1 = m1; 1618 const struct nfit_set_info_map *map1 = m1;
1619 1619
1620 return map0->region_offset - map1->region_offset; 1620 if (map0->region_offset < map1->region_offset)
1621 return -1;
1622 else if (map0->region_offset > map1->region_offset)
1623 return 1;
1624 return 0;
1621} 1625}
1622 1626
1623/* Retrieve the nth entry referencing this spa */ 1627/* Retrieve the nth entry referencing this spa */
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 192691880d55..2433569b02ef 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1857,15 +1857,20 @@ static void acpi_bus_attach(struct acpi_device *device)
1857 return; 1857 return;
1858 1858
1859 device->flags.match_driver = true; 1859 device->flags.match_driver = true;
1860 if (!ret) { 1860 if (ret > 0) {
1861 ret = device_attach(&device->dev); 1861 acpi_device_set_enumerated(device);
1862 if (ret < 0) 1862 goto ok;
1863 return;
1864
1865 if (!ret && device->pnp.type.platform_id)
1866 acpi_default_enumeration(device);
1867 } 1863 }
1868 1864
1865 ret = device_attach(&device->dev);
1866 if (ret < 0)
1867 return;
1868
1869 if (ret > 0 || !device->pnp.type.platform_id)
1870 acpi_device_set_enumerated(device);
1871 else
1872 acpi_default_enumeration(device);
1873
1869 ok: 1874 ok:
1870 list_for_each_entry(child, &device->children, node) 1875 list_for_each_entry(child, &device->children, node)
1871 acpi_bus_attach(child); 1876 acpi_bus_attach(child);
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 6c9aa95a9a05..49d705c9f0f7 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -278,11 +278,6 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
278 }; 278 };
279 const struct ata_port_info *ppi[] = { &info, &info }; 279 const struct ata_port_info *ppi[] = { &info, &info };
280 280
281 /* SB600/700 don't have secondary port wired */
282 if ((pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE) ||
283 (pdev->device == PCI_DEVICE_ID_ATI_IXP700_IDE))
284 ppi[1] = &ata_dummy_port_info;
285
286 return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL, 281 return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
287 ATA_HOST_PARALLEL_SCAN); 282 ATA_HOST_PARALLEL_SCAN);
288} 283}
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 0636d84fbefe..f3f538eec7b3 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -644,14 +644,16 @@ static void svia_configure(struct pci_dev *pdev, int board_id,
644 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); 644 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
645 } 645 }
646 646
647 /* enable IRQ on hotplug */ 647 if (board_id == vt6421) {
648 pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8); 648 /* enable IRQ on hotplug */
649 if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) { 649 pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
650 dev_dbg(&pdev->dev, 650 if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
651 "enabling SATA hotplug (0x%x)\n", 651 dev_dbg(&pdev->dev,
652 (int) tmp8); 652 "enabling SATA hotplug (0x%x)\n",
653 tmp8 |= SATA_HOTPLUG; 653 (int) tmp8);
654 pci_write_config_byte(pdev, SVIA_MISC_3, tmp8); 654 tmp8 |= SATA_HOTPLUG;
655 pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
656 }
655 } 657 }
656 658
657 /* 659 /*
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 7e4287bc19e5..d8a23561b4cb 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -47,6 +47,8 @@ static DEFINE_MUTEX(nbd_index_mutex);
47struct nbd_sock { 47struct nbd_sock {
48 struct socket *sock; 48 struct socket *sock;
49 struct mutex tx_lock; 49 struct mutex tx_lock;
50 struct request *pending;
51 int sent;
50}; 52};
51 53
52#define NBD_TIMEDOUT 0 54#define NBD_TIMEDOUT 0
@@ -124,7 +126,8 @@ static const char *nbdcmd_to_ascii(int cmd)
124 126
125static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev) 127static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev)
126{ 128{
127 bd_set_size(bdev, 0); 129 if (bdev->bd_openers <= 1)
130 bd_set_size(bdev, 0);
128 set_capacity(nbd->disk, 0); 131 set_capacity(nbd->disk, 0);
129 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); 132 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
130 133
@@ -190,7 +193,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
190 193
191 dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n"); 194 dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
192 set_bit(NBD_TIMEDOUT, &nbd->runtime_flags); 195 set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);
193 req->errors++; 196 req->errors = -EIO;
194 197
195 mutex_lock(&nbd->config_lock); 198 mutex_lock(&nbd->config_lock);
196 sock_shutdown(nbd); 199 sock_shutdown(nbd);
@@ -202,7 +205,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
202 * Send or receive packet. 205 * Send or receive packet.
203 */ 206 */
204static int sock_xmit(struct nbd_device *nbd, int index, int send, 207static int sock_xmit(struct nbd_device *nbd, int index, int send,
205 struct iov_iter *iter, int msg_flags) 208 struct iov_iter *iter, int msg_flags, int *sent)
206{ 209{
207 struct socket *sock = nbd->socks[index]->sock; 210 struct socket *sock = nbd->socks[index]->sock;
208 int result; 211 int result;
@@ -237,6 +240,8 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
237 result = -EPIPE; /* short read */ 240 result = -EPIPE; /* short read */
238 break; 241 break;
239 } 242 }
243 if (sent)
244 *sent += result;
240 } while (msg_data_left(&msg)); 245 } while (msg_data_left(&msg));
241 246
242 tsk_restore_flags(current, pflags, PF_MEMALLOC); 247 tsk_restore_flags(current, pflags, PF_MEMALLOC);
@@ -248,6 +253,7 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
248static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) 253static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
249{ 254{
250 struct request *req = blk_mq_rq_from_pdu(cmd); 255 struct request *req = blk_mq_rq_from_pdu(cmd);
256 struct nbd_sock *nsock = nbd->socks[index];
251 int result; 257 int result;
252 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)}; 258 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
253 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; 259 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
@@ -256,6 +262,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
256 struct bio *bio; 262 struct bio *bio;
257 u32 type; 263 u32 type;
258 u32 tag = blk_mq_unique_tag(req); 264 u32 tag = blk_mq_unique_tag(req);
265 int sent = nsock->sent, skip = 0;
259 266
260 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); 267 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
261 268
@@ -283,6 +290,17 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
283 return -EIO; 290 return -EIO;
284 } 291 }
285 292
293 /* We did a partial send previously, and we at least sent the whole
294 * request struct, so just go and send the rest of the pages in the
295 * request.
296 */
297 if (sent) {
298 if (sent >= sizeof(request)) {
299 skip = sent - sizeof(request);
300 goto send_pages;
301 }
302 iov_iter_advance(&from, sent);
303 }
286 request.type = htonl(type); 304 request.type = htonl(type);
287 if (type != NBD_CMD_FLUSH) { 305 if (type != NBD_CMD_FLUSH) {
288 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); 306 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
@@ -294,15 +312,27 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
294 cmd, nbdcmd_to_ascii(type), 312 cmd, nbdcmd_to_ascii(type),
295 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); 313 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
296 result = sock_xmit(nbd, index, 1, &from, 314 result = sock_xmit(nbd, index, 1, &from,
297 (type == NBD_CMD_WRITE) ? MSG_MORE : 0); 315 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
298 if (result <= 0) { 316 if (result <= 0) {
317 if (result == -ERESTARTSYS) {
318 /* If we havne't sent anything we can just return BUSY,
319 * however if we have sent something we need to make
320 * sure we only allow this req to be sent until we are
321 * completely done.
322 */
323 if (sent) {
324 nsock->pending = req;
325 nsock->sent = sent;
326 }
327 return BLK_MQ_RQ_QUEUE_BUSY;
328 }
299 dev_err_ratelimited(disk_to_dev(nbd->disk), 329 dev_err_ratelimited(disk_to_dev(nbd->disk),
300 "Send control failed (result %d)\n", result); 330 "Send control failed (result %d)\n", result);
301 return -EIO; 331 return -EIO;
302 } 332 }
303 333send_pages:
304 if (type != NBD_CMD_WRITE) 334 if (type != NBD_CMD_WRITE)
305 return 0; 335 goto out;
306 336
307 bio = req->bio; 337 bio = req->bio;
308 while (bio) { 338 while (bio) {
@@ -318,8 +348,25 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
318 cmd, bvec.bv_len); 348 cmd, bvec.bv_len);
319 iov_iter_bvec(&from, ITER_BVEC | WRITE, 349 iov_iter_bvec(&from, ITER_BVEC | WRITE,
320 &bvec, 1, bvec.bv_len); 350 &bvec, 1, bvec.bv_len);
321 result = sock_xmit(nbd, index, 1, &from, flags); 351 if (skip) {
352 if (skip >= iov_iter_count(&from)) {
353 skip -= iov_iter_count(&from);
354 continue;
355 }
356 iov_iter_advance(&from, skip);
357 skip = 0;
358 }
359 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
322 if (result <= 0) { 360 if (result <= 0) {
361 if (result == -ERESTARTSYS) {
362 /* We've already sent the header, we
363 * have no choice but to set pending and
364 * return BUSY.
365 */
366 nsock->pending = req;
367 nsock->sent = sent;
368 return BLK_MQ_RQ_QUEUE_BUSY;
369 }
323 dev_err(disk_to_dev(nbd->disk), 370 dev_err(disk_to_dev(nbd->disk),
324 "Send data failed (result %d)\n", 371 "Send data failed (result %d)\n",
325 result); 372 result);
@@ -336,6 +383,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
336 } 383 }
337 bio = next; 384 bio = next;
338 } 385 }
386out:
387 nsock->pending = NULL;
388 nsock->sent = 0;
339 return 0; 389 return 0;
340} 390}
341 391
@@ -353,7 +403,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
353 403
354 reply.magic = 0; 404 reply.magic = 0;
355 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply)); 405 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
356 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL); 406 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
357 if (result <= 0) { 407 if (result <= 0) {
358 if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) && 408 if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) &&
359 !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags)) 409 !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
@@ -383,7 +433,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
383 if (ntohl(reply.error)) { 433 if (ntohl(reply.error)) {
384 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", 434 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
385 ntohl(reply.error)); 435 ntohl(reply.error));
386 req->errors++; 436 req->errors = -EIO;
387 return cmd; 437 return cmd;
388 } 438 }
389 439
@@ -395,11 +445,11 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
395 rq_for_each_segment(bvec, req, iter) { 445 rq_for_each_segment(bvec, req, iter) {
396 iov_iter_bvec(&to, ITER_BVEC | READ, 446 iov_iter_bvec(&to, ITER_BVEC | READ,
397 &bvec, 1, bvec.bv_len); 447 &bvec, 1, bvec.bv_len);
398 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL); 448 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
399 if (result <= 0) { 449 if (result <= 0) {
400 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", 450 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
401 result); 451 result);
402 req->errors++; 452 req->errors = -EIO;
403 return cmd; 453 return cmd;
404 } 454 }
405 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", 455 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
@@ -469,7 +519,7 @@ static void nbd_clear_req(struct request *req, void *data, bool reserved)
469 if (!blk_mq_request_started(req)) 519 if (!blk_mq_request_started(req))
470 return; 520 return;
471 cmd = blk_mq_rq_to_pdu(req); 521 cmd = blk_mq_rq_to_pdu(req);
472 req->errors++; 522 req->errors = -EIO;
473 nbd_end_request(cmd); 523 nbd_end_request(cmd);
474} 524}
475 525
@@ -482,22 +532,23 @@ static void nbd_clear_que(struct nbd_device *nbd)
482} 532}
483 533
484 534
485static void nbd_handle_cmd(struct nbd_cmd *cmd, int index) 535static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
486{ 536{
487 struct request *req = blk_mq_rq_from_pdu(cmd); 537 struct request *req = blk_mq_rq_from_pdu(cmd);
488 struct nbd_device *nbd = cmd->nbd; 538 struct nbd_device *nbd = cmd->nbd;
489 struct nbd_sock *nsock; 539 struct nbd_sock *nsock;
540 int ret;
490 541
491 if (index >= nbd->num_connections) { 542 if (index >= nbd->num_connections) {
492 dev_err_ratelimited(disk_to_dev(nbd->disk), 543 dev_err_ratelimited(disk_to_dev(nbd->disk),
493 "Attempted send on invalid socket\n"); 544 "Attempted send on invalid socket\n");
494 goto error_out; 545 return -EINVAL;
495 } 546 }
496 547
497 if (test_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) { 548 if (test_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) {
498 dev_err_ratelimited(disk_to_dev(nbd->disk), 549 dev_err_ratelimited(disk_to_dev(nbd->disk),
499 "Attempted send on closed socket\n"); 550 "Attempted send on closed socket\n");
500 goto error_out; 551 return -EINVAL;
501 } 552 }
502 553
503 req->errors = 0; 554 req->errors = 0;
@@ -508,29 +559,30 @@ static void nbd_handle_cmd(struct nbd_cmd *cmd, int index)
508 mutex_unlock(&nsock->tx_lock); 559 mutex_unlock(&nsock->tx_lock);
509 dev_err_ratelimited(disk_to_dev(nbd->disk), 560 dev_err_ratelimited(disk_to_dev(nbd->disk),
510 "Attempted send on closed socket\n"); 561 "Attempted send on closed socket\n");
511 goto error_out; 562 return -EINVAL;
512 } 563 }
513 564
514 if (nbd_send_cmd(nbd, cmd, index) != 0) { 565 /* Handle the case that we have a pending request that was partially
515 dev_err_ratelimited(disk_to_dev(nbd->disk), 566 * transmitted that _has_ to be serviced first. We need to call requeue
516 "Request send failed\n"); 567 * here so that it gets put _after_ the request that is already on the
517 req->errors++; 568 * dispatch list.
518 nbd_end_request(cmd); 569 */
570 if (unlikely(nsock->pending && nsock->pending != req)) {
571 blk_mq_requeue_request(req, true);
572 ret = 0;
573 goto out;
519 } 574 }
520 575 ret = nbd_send_cmd(nbd, cmd, index);
576out:
521 mutex_unlock(&nsock->tx_lock); 577 mutex_unlock(&nsock->tx_lock);
522 578 return ret;
523 return;
524
525error_out:
526 req->errors++;
527 nbd_end_request(cmd);
528} 579}
529 580
530static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx, 581static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
531 const struct blk_mq_queue_data *bd) 582 const struct blk_mq_queue_data *bd)
532{ 583{
533 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 584 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
585 int ret;
534 586
535 /* 587 /*
536 * Since we look at the bio's to send the request over the network we 588 * Since we look at the bio's to send the request over the network we
@@ -543,10 +595,20 @@ static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
543 */ 595 */
544 init_completion(&cmd->send_complete); 596 init_completion(&cmd->send_complete);
545 blk_mq_start_request(bd->rq); 597 blk_mq_start_request(bd->rq);
546 nbd_handle_cmd(cmd, hctx->queue_num); 598
599 /* We can be called directly from the user space process, which means we
600 * could possibly have signals pending so our sendmsg will fail. In
601 * this case we need to return that we are busy, otherwise error out as
602 * appropriate.
603 */
604 ret = nbd_handle_cmd(cmd, hctx->queue_num);
605 if (ret < 0)
606 ret = BLK_MQ_RQ_QUEUE_ERROR;
607 if (!ret)
608 ret = BLK_MQ_RQ_QUEUE_OK;
547 complete(&cmd->send_complete); 609 complete(&cmd->send_complete);
548 610
549 return BLK_MQ_RQ_QUEUE_OK; 611 return ret;
550} 612}
551 613
552static int nbd_add_socket(struct nbd_device *nbd, struct block_device *bdev, 614static int nbd_add_socket(struct nbd_device *nbd, struct block_device *bdev,
@@ -581,6 +643,8 @@ static int nbd_add_socket(struct nbd_device *nbd, struct block_device *bdev,
581 643
582 mutex_init(&nsock->tx_lock); 644 mutex_init(&nsock->tx_lock);
583 nsock->sock = sock; 645 nsock->sock = sock;
646 nsock->pending = NULL;
647 nsock->sent = 0;
584 socks[nbd->num_connections++] = nsock; 648 socks[nbd->num_connections++] = nsock;
585 649
586 if (max_part) 650 if (max_part)
@@ -602,6 +666,8 @@ static void nbd_reset(struct nbd_device *nbd)
602 666
603static void nbd_bdev_reset(struct block_device *bdev) 667static void nbd_bdev_reset(struct block_device *bdev)
604{ 668{
669 if (bdev->bd_openers > 1)
670 return;
605 set_device_ro(bdev, false); 671 set_device_ro(bdev, false);
606 bdev->bd_inode->i_size = 0; 672 bdev->bd_inode->i_size = 0;
607 if (max_part > 0) { 673 if (max_part > 0) {
@@ -634,7 +700,7 @@ static void send_disconnects(struct nbd_device *nbd)
634 700
635 for (i = 0; i < nbd->num_connections; i++) { 701 for (i = 0; i < nbd->num_connections; i++) {
636 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); 702 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
637 ret = sock_xmit(nbd, i, 1, &from, 0); 703 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
638 if (ret <= 0) 704 if (ret <= 0)
639 dev_err(disk_to_dev(nbd->disk), 705 dev_err(disk_to_dev(nbd->disk),
640 "Send disconnect failed %d\n", ret); 706 "Send disconnect failed %d\n", ret);
@@ -665,7 +731,8 @@ static int nbd_clear_sock(struct nbd_device *nbd, struct block_device *bdev)
665{ 731{
666 sock_shutdown(nbd); 732 sock_shutdown(nbd);
667 nbd_clear_que(nbd); 733 nbd_clear_que(nbd);
668 kill_bdev(bdev); 734
735 __invalidate_device(bdev, true);
669 nbd_bdev_reset(bdev); 736 nbd_bdev_reset(bdev);
670 /* 737 /*
671 * We want to give the run thread a chance to wait for everybody 738 * We want to give the run thread a chance to wait for everybody
@@ -781,7 +848,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
781 nbd_size_set(nbd, bdev, nbd->blksize, arg); 848 nbd_size_set(nbd, bdev, nbd->blksize, arg);
782 return 0; 849 return 0;
783 case NBD_SET_TIMEOUT: 850 case NBD_SET_TIMEOUT:
784 nbd->tag_set.timeout = arg * HZ; 851 if (arg) {
852 nbd->tag_set.timeout = arg * HZ;
853 blk_queue_rq_timeout(nbd->disk->queue, arg * HZ);
854 }
785 return 0; 855 return 0;
786 856
787 case NBD_SET_FLAGS: 857 case NBD_SET_FLAGS:
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index dceb5edd1e54..0c09d4256108 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -523,7 +523,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
523 523
524 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); 524 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
525 if (size == PAGE_SIZE) { 525 if (size == PAGE_SIZE) {
526 copy_page(mem, cmem); 526 memcpy(mem, cmem, PAGE_SIZE);
527 } else { 527 } else {
528 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp); 528 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
529 529
@@ -717,7 +717,7 @@ compress_again:
717 717
718 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) { 718 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
719 src = kmap_atomic(page); 719 src = kmap_atomic(page);
720 copy_page(cmem, src); 720 memcpy(cmem, src, PAGE_SIZE);
721 kunmap_atomic(src); 721 kunmap_atomic(src);
722 } else { 722 } else {
723 memcpy(cmem, src, clen); 723 memcpy(cmem, src, clen);
@@ -928,7 +928,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
928 } 928 }
929 929
930 index = sector >> SECTORS_PER_PAGE_SHIFT; 930 index = sector >> SECTORS_PER_PAGE_SHIFT;
931 offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT; 931 offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
932 932
933 bv.bv_page = page; 933 bv.bv_page = page;
934 bv.bv_len = PAGE_SIZE; 934 bv.bv_len = PAGE_SIZE;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 6d9cc2d39d22..7e4a9d1296bb 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -60,6 +60,10 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
60#endif 60#endif
61 61
62#ifdef CONFIG_STRICT_DEVMEM 62#ifdef CONFIG_STRICT_DEVMEM
63static inline int page_is_allowed(unsigned long pfn)
64{
65 return devmem_is_allowed(pfn);
66}
63static inline int range_is_allowed(unsigned long pfn, unsigned long size) 67static inline int range_is_allowed(unsigned long pfn, unsigned long size)
64{ 68{
65 u64 from = ((u64)pfn) << PAGE_SHIFT; 69 u64 from = ((u64)pfn) << PAGE_SHIFT;
@@ -75,6 +79,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
75 return 1; 79 return 1;
76} 80}
77#else 81#else
82static inline int page_is_allowed(unsigned long pfn)
83{
84 return 1;
85}
78static inline int range_is_allowed(unsigned long pfn, unsigned long size) 86static inline int range_is_allowed(unsigned long pfn, unsigned long size)
79{ 87{
80 return 1; 88 return 1;
@@ -122,23 +130,31 @@ static ssize_t read_mem(struct file *file, char __user *buf,
122 130
123 while (count > 0) { 131 while (count > 0) {
124 unsigned long remaining; 132 unsigned long remaining;
133 int allowed;
125 134
126 sz = size_inside_page(p, count); 135 sz = size_inside_page(p, count);
127 136
128 if (!range_is_allowed(p >> PAGE_SHIFT, count)) 137 allowed = page_is_allowed(p >> PAGE_SHIFT);
138 if (!allowed)
129 return -EPERM; 139 return -EPERM;
140 if (allowed == 2) {
141 /* Show zeros for restricted memory. */
142 remaining = clear_user(buf, sz);
143 } else {
144 /*
145 * On ia64 if a page has been mapped somewhere as
146 * uncached, then it must also be accessed uncached
147 * by the kernel or data corruption may occur.
148 */
149 ptr = xlate_dev_mem_ptr(p);
150 if (!ptr)
151 return -EFAULT;
130 152
131 /* 153 remaining = copy_to_user(buf, ptr, sz);
132 * On ia64 if a page has been mapped somewhere as uncached, then 154
133 * it must also be accessed uncached by the kernel or data 155 unxlate_dev_mem_ptr(p, ptr);
134 * corruption may occur. 156 }
135 */
136 ptr = xlate_dev_mem_ptr(p);
137 if (!ptr)
138 return -EFAULT;
139 157
140 remaining = copy_to_user(buf, ptr, sz);
141 unxlate_dev_mem_ptr(p, ptr);
142 if (remaining) 158 if (remaining)
143 return -EFAULT; 159 return -EFAULT;
144 160
@@ -181,30 +197,36 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
181#endif 197#endif
182 198
183 while (count > 0) { 199 while (count > 0) {
200 int allowed;
201
184 sz = size_inside_page(p, count); 202 sz = size_inside_page(p, count);
185 203
186 if (!range_is_allowed(p >> PAGE_SHIFT, sz)) 204 allowed = page_is_allowed(p >> PAGE_SHIFT);
205 if (!allowed)
187 return -EPERM; 206 return -EPERM;
188 207
189 /* 208 /* Skip actual writing when a page is marked as restricted. */
190 * On ia64 if a page has been mapped somewhere as uncached, then 209 if (allowed == 1) {
191 * it must also be accessed uncached by the kernel or data 210 /*
192 * corruption may occur. 211 * On ia64 if a page has been mapped somewhere as
193 */ 212 * uncached, then it must also be accessed uncached
194 ptr = xlate_dev_mem_ptr(p); 213 * by the kernel or data corruption may occur.
195 if (!ptr) { 214 */
196 if (written) 215 ptr = xlate_dev_mem_ptr(p);
197 break; 216 if (!ptr) {
198 return -EFAULT; 217 if (written)
199 } 218 break;
219 return -EFAULT;
220 }
200 221
201 copied = copy_from_user(ptr, buf, sz); 222 copied = copy_from_user(ptr, buf, sz);
202 unxlate_dev_mem_ptr(p, ptr); 223 unxlate_dev_mem_ptr(p, ptr);
203 if (copied) { 224 if (copied) {
204 written += sz - copied; 225 written += sz - copied;
205 if (written) 226 if (written)
206 break; 227 break;
207 return -EFAULT; 228 return -EFAULT;
229 }
208 } 230 }
209 231
210 buf += sz; 232 buf += sz;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index e9b7e0b3cabe..87fe111d0be6 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -2202,14 +2202,16 @@ static int virtcons_freeze(struct virtio_device *vdev)
2202 2202
2203 vdev->config->reset(vdev); 2203 vdev->config->reset(vdev);
2204 2204
2205 virtqueue_disable_cb(portdev->c_ivq); 2205 if (use_multiport(portdev))
2206 virtqueue_disable_cb(portdev->c_ivq);
2206 cancel_work_sync(&portdev->control_work); 2207 cancel_work_sync(&portdev->control_work);
2207 cancel_work_sync(&portdev->config_work); 2208 cancel_work_sync(&portdev->config_work);
2208 /* 2209 /*
2209 * Once more: if control_work_handler() was running, it would 2210 * Once more: if control_work_handler() was running, it would
2210 * enable the cb as the last step. 2211 * enable the cb as the last step.
2211 */ 2212 */
2212 virtqueue_disable_cb(portdev->c_ivq); 2213 if (use_multiport(portdev))
2214 virtqueue_disable_cb(portdev->c_ivq);
2213 remove_controlq_data(portdev); 2215 remove_controlq_data(portdev);
2214 2216
2215 list_for_each_entry(port, &portdev->ports, list) { 2217 list_for_each_entry(port, &portdev->ports, list) {
diff --git a/drivers/clocksource/clkevt-probe.c b/drivers/clocksource/clkevt-probe.c
index 8c30fec86094..eb89b502acbd 100644
--- a/drivers/clocksource/clkevt-probe.c
+++ b/drivers/clocksource/clkevt-probe.c
@@ -17,7 +17,7 @@
17 17
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/of.h> 19#include <linux/of.h>
20#include <linux/clockchip.h> 20#include <linux/clockchips.h>
21 21
22extern struct of_device_id __clkevt_of_table[]; 22extern struct of_device_id __clkevt_of_table[];
23 23
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 5dbdd261aa73..0e3f6496524d 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -918,11 +918,19 @@ static struct kobj_type ktype_cpufreq = {
918 .release = cpufreq_sysfs_release, 918 .release = cpufreq_sysfs_release,
919}; 919};
920 920
921static int add_cpu_dev_symlink(struct cpufreq_policy *policy, 921static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
922 struct device *dev)
923{ 922{
923 struct device *dev = get_cpu_device(cpu);
924
925 if (!dev)
926 return;
927
928 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
929 return;
930
924 dev_dbg(dev, "%s: Adding symlink\n", __func__); 931 dev_dbg(dev, "%s: Adding symlink\n", __func__);
925 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); 932 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
933 dev_err(dev, "cpufreq symlink creation failed\n");
926} 934}
927 935
928static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, 936static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
@@ -1180,10 +1188,10 @@ static int cpufreq_online(unsigned int cpu)
1180 policy->user_policy.min = policy->min; 1188 policy->user_policy.min = policy->min;
1181 policy->user_policy.max = policy->max; 1189 policy->user_policy.max = policy->max;
1182 1190
1183 write_lock_irqsave(&cpufreq_driver_lock, flags); 1191 for_each_cpu(j, policy->related_cpus) {
1184 for_each_cpu(j, policy->related_cpus)
1185 per_cpu(cpufreq_cpu_data, j) = policy; 1192 per_cpu(cpufreq_cpu_data, j) = policy;
1186 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1193 add_cpu_dev_symlink(policy, j);
1194 }
1187 } else { 1195 } else {
1188 policy->min = policy->user_policy.min; 1196 policy->min = policy->user_policy.min;
1189 policy->max = policy->user_policy.max; 1197 policy->max = policy->user_policy.max;
@@ -1275,13 +1283,15 @@ out_exit_policy:
1275 1283
1276 if (cpufreq_driver->exit) 1284 if (cpufreq_driver->exit)
1277 cpufreq_driver->exit(policy); 1285 cpufreq_driver->exit(policy);
1286
1287 for_each_cpu(j, policy->real_cpus)
1288 remove_cpu_dev_symlink(policy, get_cpu_device(j));
1289
1278out_free_policy: 1290out_free_policy:
1279 cpufreq_policy_free(policy); 1291 cpufreq_policy_free(policy);
1280 return ret; 1292 return ret;
1281} 1293}
1282 1294
1283static int cpufreq_offline(unsigned int cpu);
1284
1285/** 1295/**
1286 * cpufreq_add_dev - the cpufreq interface for a CPU device. 1296 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1287 * @dev: CPU device. 1297 * @dev: CPU device.
@@ -1303,16 +1313,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1303 1313
1304 /* Create sysfs link on CPU registration */ 1314 /* Create sysfs link on CPU registration */
1305 policy = per_cpu(cpufreq_cpu_data, cpu); 1315 policy = per_cpu(cpufreq_cpu_data, cpu);
1306 if (!policy || cpumask_test_and_set_cpu(cpu, policy->real_cpus)) 1316 if (policy)
1307 return 0; 1317 add_cpu_dev_symlink(policy, cpu);
1308
1309 ret = add_cpu_dev_symlink(policy, dev);
1310 if (ret) {
1311 cpumask_clear_cpu(cpu, policy->real_cpus);
1312 cpufreq_offline(cpu);
1313 }
1314 1318
1315 return ret; 1319 return 0;
1316} 1320}
1317 1321
1318static int cpufreq_offline(unsigned int cpu) 1322static int cpufreq_offline(unsigned int cpu)
@@ -2394,6 +2398,20 @@ EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2394 *********************************************************************/ 2398 *********************************************************************/
2395static enum cpuhp_state hp_online; 2399static enum cpuhp_state hp_online;
2396 2400
2401static int cpuhp_cpufreq_online(unsigned int cpu)
2402{
2403 cpufreq_online(cpu);
2404
2405 return 0;
2406}
2407
2408static int cpuhp_cpufreq_offline(unsigned int cpu)
2409{
2410 cpufreq_offline(cpu);
2411
2412 return 0;
2413}
2414
2397/** 2415/**
2398 * cpufreq_register_driver - register a CPU Frequency driver 2416 * cpufreq_register_driver - register a CPU Frequency driver
2399 * @driver_data: A struct cpufreq_driver containing the values# 2417 * @driver_data: A struct cpufreq_driver containing the values#
@@ -2456,8 +2474,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2456 } 2474 }
2457 2475
2458 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online", 2476 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online",
2459 cpufreq_online, 2477 cpuhp_cpufreq_online,
2460 cpufreq_offline); 2478 cpuhp_cpufreq_offline);
2461 if (ret < 0) 2479 if (ret < 0)
2462 goto err_if_unreg; 2480 goto err_if_unreg;
2463 hp_online = ret; 2481 hp_online = ret;
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index 370593006f5f..cda8f62d555b 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -175,6 +175,24 @@ static int powernv_cpuidle_driver_init(void)
175 drv->state_count += 1; 175 drv->state_count += 1;
176 } 176 }
177 177
178 /*
179 * On the PowerNV platform cpu_present may be less than cpu_possible in
180 * cases when firmware detects the CPU, but it is not available to the
181 * OS. If CONFIG_HOTPLUG_CPU=n, then such CPUs are not hotplugable at
182 * run time and hence cpu_devices are not created for those CPUs by the
183 * generic topology_init().
184 *
185 * drv->cpumask defaults to cpu_possible_mask in
186 * __cpuidle_driver_init(). This breaks cpuidle on PowerNV where
187 * cpu_devices are not created for CPUs in cpu_possible_mask that
188 * cannot be hot-added later at run time.
189 *
190 * Trying cpuidle_register_device() on a CPU without a cpu_device is
191 * incorrect, so pass a correct CPU mask to the generic cpuidle driver.
192 */
193
194 drv->cpumask = (struct cpumask *)cpu_present_mask;
195
178 return 0; 196 return 0;
179} 197}
180 198
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 32100c4851dd..49cbdcba7883 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -506,7 +506,7 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
506 ctx->dev = caam_jr_alloc(); 506 ctx->dev = caam_jr_alloc();
507 507
508 if (IS_ERR(ctx->dev)) { 508 if (IS_ERR(ctx->dev)) {
509 dev_err(ctx->dev, "Job Ring Device allocation for transform failed\n"); 509 pr_err("Job Ring Device allocation for transform failed\n");
510 return PTR_ERR(ctx->dev); 510 return PTR_ERR(ctx->dev);
511 } 511 }
512 512
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index fef39f9f41ee..5d7f73d60515 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -281,7 +281,8 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
281 /* Try to run it through DECO0 */ 281 /* Try to run it through DECO0 */
282 ret = run_descriptor_deco0(ctrldev, desc, &status); 282 ret = run_descriptor_deco0(ctrldev, desc, &status);
283 283
284 if (ret || status) { 284 if (ret ||
285 (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
285 dev_err(ctrldev, 286 dev_err(ctrldev,
286 "Failed to deinstantiate RNG4 SH%d\n", 287 "Failed to deinstantiate RNG4 SH%d\n",
287 sh_idx); 288 sh_idx);
@@ -301,15 +302,13 @@ static int caam_remove(struct platform_device *pdev)
301 struct device *ctrldev; 302 struct device *ctrldev;
302 struct caam_drv_private *ctrlpriv; 303 struct caam_drv_private *ctrlpriv;
303 struct caam_ctrl __iomem *ctrl; 304 struct caam_ctrl __iomem *ctrl;
304 int ring;
305 305
306 ctrldev = &pdev->dev; 306 ctrldev = &pdev->dev;
307 ctrlpriv = dev_get_drvdata(ctrldev); 307 ctrlpriv = dev_get_drvdata(ctrldev);
308 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl; 308 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
309 309
310 /* Remove platform devices for JobRs */ 310 /* Remove platform devices under the crypto node */
311 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) 311 of_platform_depopulate(ctrldev);
312 of_device_unregister(ctrlpriv->jrpdev[ring]);
313 312
314 /* De-initialize RNG state handles initialized by this driver. */ 313 /* De-initialize RNG state handles initialized by this driver. */
315 if (ctrlpriv->rng4_sh_init) 314 if (ctrlpriv->rng4_sh_init)
@@ -418,10 +417,21 @@ DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
418DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n"); 417DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
419#endif 418#endif
420 419
420static const struct of_device_id caam_match[] = {
421 {
422 .compatible = "fsl,sec-v4.0",
423 },
424 {
425 .compatible = "fsl,sec4.0",
426 },
427 {},
428};
429MODULE_DEVICE_TABLE(of, caam_match);
430
421/* Probe routine for CAAM top (controller) level */ 431/* Probe routine for CAAM top (controller) level */
422static int caam_probe(struct platform_device *pdev) 432static int caam_probe(struct platform_device *pdev)
423{ 433{
424 int ret, ring, ridx, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; 434 int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
425 u64 caam_id; 435 u64 caam_id;
426 struct device *dev; 436 struct device *dev;
427 struct device_node *nprop, *np; 437 struct device_node *nprop, *np;
@@ -597,47 +607,24 @@ static int caam_probe(struct platform_device *pdev)
597 goto iounmap_ctrl; 607 goto iounmap_ctrl;
598 } 608 }
599 609
600 /* 610 ret = of_platform_populate(nprop, caam_match, NULL, dev);
601 * Detect and enable JobRs 611 if (ret) {
602 * First, find out how many ring spec'ed, allocate references 612 dev_err(dev, "JR platform devices creation error\n");
603 * for all, then go probe each one.
604 */
605 rspec = 0;
606 for_each_available_child_of_node(nprop, np)
607 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
608 of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
609 rspec++;
610
611 ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
612 sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
613 if (ctrlpriv->jrpdev == NULL) {
614 ret = -ENOMEM;
615 goto iounmap_ctrl; 613 goto iounmap_ctrl;
616 } 614 }
617 615
618 ring = 0; 616 ring = 0;
619 ridx = 0;
620 ctrlpriv->total_jobrs = 0;
621 for_each_available_child_of_node(nprop, np) 617 for_each_available_child_of_node(nprop, np)
622 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || 618 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
623 of_device_is_compatible(np, "fsl,sec4.0-job-ring")) { 619 of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
624 ctrlpriv->jrpdev[ring] =
625 of_platform_device_create(np, NULL, dev);
626 if (!ctrlpriv->jrpdev[ring]) {
627 pr_warn("JR physical index %d: Platform device creation error\n",
628 ridx);
629 ridx++;
630 continue;
631 }
632 ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *) 620 ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
633 ((__force uint8_t *)ctrl + 621 ((__force uint8_t *)ctrl +
634 (ridx + JR_BLOCK_NUMBER) * 622 (ring + JR_BLOCK_NUMBER) *
635 BLOCK_OFFSET 623 BLOCK_OFFSET
636 ); 624 );
637 ctrlpriv->total_jobrs++; 625 ctrlpriv->total_jobrs++;
638 ring++; 626 ring++;
639 ridx++; 627 }
640 }
641 628
642 /* Check to see if QI present. If so, enable */ 629 /* Check to see if QI present. If so, enable */
643 ctrlpriv->qi_present = 630 ctrlpriv->qi_present =
@@ -847,17 +834,6 @@ disable_caam_ipg:
847 return ret; 834 return ret;
848} 835}
849 836
850static struct of_device_id caam_match[] = {
851 {
852 .compatible = "fsl,sec-v4.0",
853 },
854 {
855 .compatible = "fsl,sec4.0",
856 },
857 {},
858};
859MODULE_DEVICE_TABLE(of, caam_match);
860
861static struct platform_driver caam_driver = { 837static struct platform_driver caam_driver = {
862 .driver = { 838 .driver = {
863 .name = "caam", 839 .name = "caam",
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index e2bcacc1a921..dbed8baeebe5 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -66,7 +66,6 @@ struct caam_drv_private_jr {
66struct caam_drv_private { 66struct caam_drv_private {
67 67
68 struct device *dev; 68 struct device *dev;
69 struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
70 struct platform_device *pdev; 69 struct platform_device *pdev;
71 70
72 /* Physical-presence section */ 71 /* Physical-presence section */
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index 41cc853f8569..fc08b4ed69d9 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -1015,6 +1015,7 @@ const struct ccp_vdata ccpv5a = {
1015 1015
1016const struct ccp_vdata ccpv5b = { 1016const struct ccp_vdata ccpv5b = {
1017 .version = CCP_VERSION(5, 0), 1017 .version = CCP_VERSION(5, 0),
1018 .dma_chan_attr = DMA_PRIVATE,
1018 .setup = ccp5other_config, 1019 .setup = ccp5other_config,
1019 .perform = &ccp5_actions, 1020 .perform = &ccp5_actions,
1020 .bar = 2, 1021 .bar = 2,
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 2b5c01fade05..aa36f3f81860 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -179,6 +179,10 @@
179 179
180/* ------------------------ General CCP Defines ------------------------ */ 180/* ------------------------ General CCP Defines ------------------------ */
181 181
182#define CCP_DMA_DFLT 0x0
183#define CCP_DMA_PRIV 0x1
184#define CCP_DMA_PUB 0x2
185
182#define CCP_DMAPOOL_MAX_SIZE 64 186#define CCP_DMAPOOL_MAX_SIZE 64
183#define CCP_DMAPOOL_ALIGN BIT(5) 187#define CCP_DMAPOOL_ALIGN BIT(5)
184 188
@@ -636,6 +640,7 @@ struct ccp_actions {
636/* Structure to hold CCP version-specific values */ 640/* Structure to hold CCP version-specific values */
637struct ccp_vdata { 641struct ccp_vdata {
638 const unsigned int version; 642 const unsigned int version;
643 const unsigned int dma_chan_attr;
639 void (*setup)(struct ccp_device *); 644 void (*setup)(struct ccp_device *);
640 const struct ccp_actions *perform; 645 const struct ccp_actions *perform;
641 const unsigned int bar; 646 const unsigned int bar;
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index 8d0eeb46d4a2..e00be01fbf5a 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -10,6 +10,7 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/module.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/dmaengine.h> 15#include <linux/dmaengine.h>
15#include <linux/spinlock.h> 16#include <linux/spinlock.h>
@@ -25,6 +26,37 @@
25 (mask == 0) ? 64 : fls64(mask); \ 26 (mask == 0) ? 64 : fls64(mask); \
26}) 27})
27 28
29/* The CCP as a DMA provider can be configured for public or private
30 * channels. Default is specified in the vdata for the device (PCI ID).
31 * This module parameter will override for all channels on all devices:
32 * dma_chan_attr = 0x2 to force all channels public
33 * = 0x1 to force all channels private
34 * = 0x0 to defer to the vdata setting
35 * = any other value: warning, revert to 0x0
36 */
37static unsigned int dma_chan_attr = CCP_DMA_DFLT;
38module_param(dma_chan_attr, uint, 0444);
39MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public");
40
41unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp)
42{
43 switch (dma_chan_attr) {
44 case CCP_DMA_DFLT:
45 return ccp->vdata->dma_chan_attr;
46
47 case CCP_DMA_PRIV:
48 return DMA_PRIVATE;
49
50 case CCP_DMA_PUB:
51 return 0;
52
53 default:
54 dev_info_once(ccp->dev, "Invalid value for dma_chan_attr: %d\n",
55 dma_chan_attr);
56 return ccp->vdata->dma_chan_attr;
57 }
58}
59
28static void ccp_free_cmd_resources(struct ccp_device *ccp, 60static void ccp_free_cmd_resources(struct ccp_device *ccp,
29 struct list_head *list) 61 struct list_head *list)
30{ 62{
@@ -675,6 +707,15 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
675 dma_cap_set(DMA_SG, dma_dev->cap_mask); 707 dma_cap_set(DMA_SG, dma_dev->cap_mask);
676 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); 708 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
677 709
710 /* The DMA channels for this device can be set to public or private,
711 * and overridden by the module parameter dma_chan_attr.
712 * Default: according to the value in vdata (dma_chan_attr=0)
713 * dma_chan_attr=0x1: all channels private (override vdata)
714 * dma_chan_attr=0x2: all channels public (override vdata)
715 */
716 if (ccp_get_dma_chan_attr(ccp) == DMA_PRIVATE)
717 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
718
678 INIT_LIST_HEAD(&dma_dev->channels); 719 INIT_LIST_HEAD(&dma_dev->channels);
679 for (i = 0; i < ccp->cmd_q_count; i++) { 720 for (i = 0; i < ccp->cmd_q_count; i++) {
680 chan = ccp->ccp_dma_chan + i; 721 chan = ccp->ccp_dma_chan + i;
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index 3e2ab3b14eea..9e95bf94eb13 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -2,6 +2,7 @@ menuconfig DEV_DAX
2 tristate "DAX: direct access to differentiated memory" 2 tristate "DAX: direct access to differentiated memory"
3 default m if NVDIMM_DAX 3 default m if NVDIMM_DAX
4 depends on TRANSPARENT_HUGEPAGE 4 depends on TRANSPARENT_HUGEPAGE
5 select SRCU
5 help 6 help
6 Support raw access to differentiated (persistence, bandwidth, 7 Support raw access to differentiated (persistence, bandwidth,
7 latency...) memory via an mmap(2) capable character 8 latency...) memory via an mmap(2) capable character
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 80c6db279ae1..806f180c80d8 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -25,6 +25,7 @@
25#include "dax.h" 25#include "dax.h"
26 26
27static dev_t dax_devt; 27static dev_t dax_devt;
28DEFINE_STATIC_SRCU(dax_srcu);
28static struct class *dax_class; 29static struct class *dax_class;
29static DEFINE_IDA(dax_minor_ida); 30static DEFINE_IDA(dax_minor_ida);
30static int nr_dax = CONFIG_NR_DEV_DAX; 31static int nr_dax = CONFIG_NR_DEV_DAX;
@@ -60,7 +61,7 @@ struct dax_region {
60 * @region - parent region 61 * @region - parent region
61 * @dev - device backing the character device 62 * @dev - device backing the character device
62 * @cdev - core chardev data 63 * @cdev - core chardev data
63 * @alive - !alive + rcu grace period == no new mappings can be established 64 * @alive - !alive + srcu grace period == no new mappings can be established
64 * @id - child id in the region 65 * @id - child id in the region
65 * @num_resources - number of physical address extents in this device 66 * @num_resources - number of physical address extents in this device
66 * @res - array of physical address ranges 67 * @res - array of physical address ranges
@@ -569,7 +570,7 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
569static int dax_dev_huge_fault(struct vm_fault *vmf, 570static int dax_dev_huge_fault(struct vm_fault *vmf,
570 enum page_entry_size pe_size) 571 enum page_entry_size pe_size)
571{ 572{
572 int rc; 573 int rc, id;
573 struct file *filp = vmf->vma->vm_file; 574 struct file *filp = vmf->vma->vm_file;
574 struct dax_dev *dax_dev = filp->private_data; 575 struct dax_dev *dax_dev = filp->private_data;
575 576
@@ -578,7 +579,7 @@ static int dax_dev_huge_fault(struct vm_fault *vmf,
578 ? "write" : "read", 579 ? "write" : "read",
579 vmf->vma->vm_start, vmf->vma->vm_end); 580 vmf->vma->vm_start, vmf->vma->vm_end);
580 581
581 rcu_read_lock(); 582 id = srcu_read_lock(&dax_srcu);
582 switch (pe_size) { 583 switch (pe_size) {
583 case PE_SIZE_PTE: 584 case PE_SIZE_PTE:
584 rc = __dax_dev_pte_fault(dax_dev, vmf); 585 rc = __dax_dev_pte_fault(dax_dev, vmf);
@@ -592,7 +593,7 @@ static int dax_dev_huge_fault(struct vm_fault *vmf,
592 default: 593 default:
593 return VM_FAULT_FALLBACK; 594 return VM_FAULT_FALLBACK;
594 } 595 }
595 rcu_read_unlock(); 596 srcu_read_unlock(&dax_srcu, id);
596 597
597 return rc; 598 return rc;
598} 599}
@@ -713,11 +714,11 @@ static void unregister_dax_dev(void *dev)
713 * Note, rcu is not protecting the liveness of dax_dev, rcu is 714 * Note, rcu is not protecting the liveness of dax_dev, rcu is
714 * ensuring that any fault handlers that might have seen 715 * ensuring that any fault handlers that might have seen
715 * dax_dev->alive == true, have completed. Any fault handlers 716 * dax_dev->alive == true, have completed. Any fault handlers
716 * that start after synchronize_rcu() has started will abort 717 * that start after synchronize_srcu() has started will abort
717 * upon seeing dax_dev->alive == false. 718 * upon seeing dax_dev->alive == false.
718 */ 719 */
719 dax_dev->alive = false; 720 dax_dev->alive = false;
720 synchronize_rcu(); 721 synchronize_srcu(&dax_srcu);
721 unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1); 722 unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1);
722 cdev_del(cdev); 723 cdev_del(cdev);
723 device_unregister(dev); 724 device_unregister(dev);
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 0007b792827b..512bdbc23bbb 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -405,8 +405,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
405 || !exp_info->ops->map_dma_buf 405 || !exp_info->ops->map_dma_buf
406 || !exp_info->ops->unmap_dma_buf 406 || !exp_info->ops->unmap_dma_buf
407 || !exp_info->ops->release 407 || !exp_info->ops->release
408 || !exp_info->ops->kmap_atomic 408 || !exp_info->ops->map_atomic
409 || !exp_info->ops->kmap 409 || !exp_info->ops->map
410 || !exp_info->ops->mmap)) { 410 || !exp_info->ops->mmap)) {
411 return ERR_PTR(-EINVAL); 411 return ERR_PTR(-EINVAL);
412 } 412 }
@@ -872,7 +872,7 @@ void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
872{ 872{
873 WARN_ON(!dmabuf); 873 WARN_ON(!dmabuf);
874 874
875 return dmabuf->ops->kmap_atomic(dmabuf, page_num); 875 return dmabuf->ops->map_atomic(dmabuf, page_num);
876} 876}
877EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic); 877EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
878 878
@@ -889,8 +889,8 @@ void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
889{ 889{
890 WARN_ON(!dmabuf); 890 WARN_ON(!dmabuf);
891 891
892 if (dmabuf->ops->kunmap_atomic) 892 if (dmabuf->ops->unmap_atomic)
893 dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr); 893 dmabuf->ops->unmap_atomic(dmabuf, page_num, vaddr);
894} 894}
895EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic); 895EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
896 896
@@ -907,7 +907,7 @@ void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
907{ 907{
908 WARN_ON(!dmabuf); 908 WARN_ON(!dmabuf);
909 909
910 return dmabuf->ops->kmap(dmabuf, page_num); 910 return dmabuf->ops->map(dmabuf, page_num);
911} 911}
912EXPORT_SYMBOL_GPL(dma_buf_kmap); 912EXPORT_SYMBOL_GPL(dma_buf_kmap);
913 913
@@ -924,8 +924,8 @@ void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
924{ 924{
925 WARN_ON(!dmabuf); 925 WARN_ON(!dmabuf);
926 926
927 if (dmabuf->ops->kunmap) 927 if (dmabuf->ops->unmap)
928 dmabuf->ops->kunmap(dmabuf, page_num, vaddr); 928 dmabuf->ops->unmap(dmabuf, page_num, vaddr);
929} 929}
930EXPORT_SYMBOL_GPL(dma_buf_kunmap); 930EXPORT_SYMBOL_GPL(dma_buf_kunmap);
931 931
@@ -1059,7 +1059,11 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
1059 int ret; 1059 int ret;
1060 struct dma_buf *buf_obj; 1060 struct dma_buf *buf_obj;
1061 struct dma_buf_attachment *attach_obj; 1061 struct dma_buf_attachment *attach_obj;
1062 int count = 0, attach_count; 1062 struct reservation_object *robj;
1063 struct reservation_object_list *fobj;
1064 struct dma_fence *fence;
1065 unsigned seq;
1066 int count = 0, attach_count, shared_count, i;
1063 size_t size = 0; 1067 size_t size = 0;
1064 1068
1065 ret = mutex_lock_interruptible(&db_list.lock); 1069 ret = mutex_lock_interruptible(&db_list.lock);
@@ -1068,7 +1072,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
1068 return ret; 1072 return ret;
1069 1073
1070 seq_puts(s, "\nDma-buf Objects:\n"); 1074 seq_puts(s, "\nDma-buf Objects:\n");
1071 seq_puts(s, "size\tflags\tmode\tcount\texp_name\n"); 1075 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\n",
1076 "size", "flags", "mode", "count");
1072 1077
1073 list_for_each_entry(buf_obj, &db_list.head, list_node) { 1078 list_for_each_entry(buf_obj, &db_list.head, list_node) {
1074 ret = mutex_lock_interruptible(&buf_obj->lock); 1079 ret = mutex_lock_interruptible(&buf_obj->lock);
@@ -1085,6 +1090,34 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
1085 file_count(buf_obj->file), 1090 file_count(buf_obj->file),
1086 buf_obj->exp_name); 1091 buf_obj->exp_name);
1087 1092
1093 robj = buf_obj->resv;
1094 while (true) {
1095 seq = read_seqcount_begin(&robj->seq);
1096 rcu_read_lock();
1097 fobj = rcu_dereference(robj->fence);
1098 shared_count = fobj ? fobj->shared_count : 0;
1099 fence = rcu_dereference(robj->fence_excl);
1100 if (!read_seqcount_retry(&robj->seq, seq))
1101 break;
1102 rcu_read_unlock();
1103 }
1104
1105 if (fence)
1106 seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
1107 fence->ops->get_driver_name(fence),
1108 fence->ops->get_timeline_name(fence),
1109 dma_fence_is_signaled(fence) ? "" : "un");
1110 for (i = 0; i < shared_count; i++) {
1111 fence = rcu_dereference(fobj->shared[i]);
1112 if (!dma_fence_get_rcu(fence))
1113 continue;
1114 seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
1115 fence->ops->get_driver_name(fence),
1116 fence->ops->get_timeline_name(fence),
1117 dma_fence_is_signaled(fence) ? "" : "un");
1118 }
1119 rcu_read_unlock();
1120
1088 seq_puts(s, "\tAttached Devices:\n"); 1121 seq_puts(s, "\tAttached Devices:\n");
1089 attach_count = 0; 1122 attach_count = 0;
1090 1123
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index e18dc596cf24..6204cc32d09c 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -251,8 +251,11 @@ static void bcm2835_dma_create_cb_set_length(
251 */ 251 */
252 252
253 /* have we filled in period_length yet? */ 253 /* have we filled in period_length yet? */
254 if (*total_len + control_block->length < period_len) 254 if (*total_len + control_block->length < period_len) {
255 /* update number of bytes in this period so far */
256 *total_len += control_block->length;
255 return; 257 return;
258 }
256 259
257 /* calculate the length that remains to reach period_length */ 260 /* calculate the length that remains to reach period_length */
258 control_block->length = period_len - *total_len; 261 control_block->length = period_len - *total_len;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 24e0221fd66d..d9118ec23025 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1108,12 +1108,14 @@ static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
1108 switch (order) { 1108 switch (order) {
1109 case 0 ... 1: 1109 case 0 ... 1:
1110 return &unmap_pool[0]; 1110 return &unmap_pool[0];
1111#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1111 case 2 ... 4: 1112 case 2 ... 4:
1112 return &unmap_pool[1]; 1113 return &unmap_pool[1];
1113 case 5 ... 7: 1114 case 5 ... 7:
1114 return &unmap_pool[2]; 1115 return &unmap_pool[2];
1115 case 8: 1116 case 8:
1116 return &unmap_pool[3]; 1117 return &unmap_pool[3];
1118#endif
1117 default: 1119 default:
1118 BUG(); 1120 BUG();
1119 return NULL; 1121 return NULL;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 82d85cce81f8..4773f2867234 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -43,6 +43,7 @@ config EDAC_LEGACY_SYSFS
43 43
44config EDAC_DEBUG 44config EDAC_DEBUG
45 bool "Debugging" 45 bool "Debugging"
46 select DEBUG_FS
46 help 47 help
47 This turns on debugging information for the entire EDAC subsystem. 48 This turns on debugging information for the entire EDAC subsystem.
48 You do so by inserting edac_module with "edac_debug_level=x." Valid 49 You do so by inserting edac_module with "edac_debug_level=x." Valid
@@ -259,6 +260,15 @@ config EDAC_SKX
259 Support for error detection and correction the Intel 260 Support for error detection and correction the Intel
260 Skylake server Integrated Memory Controllers. 261 Skylake server Integrated Memory Controllers.
261 262
263config EDAC_PND2
264 tristate "Intel Pondicherry2"
265 depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL
266 help
267 Support for error detection and correction on the Intel
268 Pondicherry2 Integrated Memory Controller. This SoC IP is
269 first used on the Apollo Lake platform and Denverton
270 micro-server but may appear on others in the future.
271
262config EDAC_MPC85XX 272config EDAC_MPC85XX
263 tristate "Freescale MPC83xx / MPC85xx" 273 tristate "Freescale MPC83xx / MPC85xx"
264 depends on EDAC_MM_EDAC && FSL_SOC 274 depends on EDAC_MM_EDAC && FSL_SOC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 88e472e8b9a9..587107e90996 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_EDAC_I7300) += i7300_edac.o
32obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o 32obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o
33obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o 33obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o
34obj-$(CONFIG_EDAC_SKX) += skx_edac.o 34obj-$(CONFIG_EDAC_SKX) += skx_edac.o
35obj-$(CONFIG_EDAC_PND2) += pnd2_edac.o
35obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o 36obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
36obj-$(CONFIG_EDAC_E752X) += e752x_edac.o 37obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
37obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o 38obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 1670d27bcac8..f683919981b0 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1293,7 +1293,7 @@ static int i5000_init_csrows(struct mem_ctl_info *mci)
1293 dimm->mtype = MEM_FB_DDR2; 1293 dimm->mtype = MEM_FB_DDR2;
1294 1294
1295 /* ask what device type on this row */ 1295 /* ask what device type on this row */
1296 if (MTR_DRAM_WIDTH(mtr)) 1296 if (MTR_DRAM_WIDTH(mtr) == 8)
1297 dimm->dtype = DEV_X8; 1297 dimm->dtype = DEV_X8;
1298 else 1298 else
1299 dimm->dtype = DEV_X4; 1299 dimm->dtype = DEV_X4;
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index abf6ef22e220..37a9ba71da44 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -1207,13 +1207,14 @@ static int i5400_init_dimms(struct mem_ctl_info *mci)
1207 1207
1208 dimm->nr_pages = size_mb << 8; 1208 dimm->nr_pages = size_mb << 8;
1209 dimm->grain = 8; 1209 dimm->grain = 8;
1210 dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4; 1210 dimm->dtype = MTR_DRAM_WIDTH(mtr) == 8 ?
1211 DEV_X8 : DEV_X4;
1211 dimm->mtype = MEM_FB_DDR2; 1212 dimm->mtype = MEM_FB_DDR2;
1212 /* 1213 /*
1213 * The eccc mechanism is SDDC (aka SECC), with 1214 * The eccc mechanism is SDDC (aka SECC), with
1214 * is similar to Chipkill. 1215 * is similar to Chipkill.
1215 */ 1216 */
1216 dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ? 1217 dimm->edac_mode = MTR_DRAM_WIDTH(mtr) == 8 ?
1217 EDAC_S8ECD8ED : EDAC_S4ECD4ED; 1218 EDAC_S8ECD8ED : EDAC_S4ECD4ED;
1218 ndimms++; 1219 ndimms++;
1219 } 1220 }
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
new file mode 100644
index 000000000000..928e0dba41fc
--- /dev/null
+++ b/drivers/edac/pnd2_edac.c
@@ -0,0 +1,1546 @@
1/*
2 * Driver for Pondicherry2 memory controller.
3 *
4 * Copyright (c) 2016, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * [Derived from sb_edac.c]
16 *
17 * Translation of system physical addresses to DIMM addresses
18 * is a two stage process:
19 *
20 * First the Pondicherry 2 memory controller handles slice and channel interleaving
21 * in "sys2pmi()". This is (almost) completley common between platforms.
22 *
23 * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
24 * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
25 */
26
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/pci.h>
30#include <linux/pci_ids.h>
31#include <linux/slab.h>
32#include <linux/delay.h>
33#include <linux/edac.h>
34#include <linux/mmzone.h>
35#include <linux/smp.h>
36#include <linux/bitmap.h>
37#include <linux/math64.h>
38#include <linux/mod_devicetable.h>
39#include <asm/cpu_device_id.h>
40#include <asm/intel-family.h>
41#include <asm/processor.h>
42#include <asm/mce.h>
43
44#include "edac_mc.h"
45#include "edac_module.h"
46#include "pnd2_edac.h"
47
48#define APL_NUM_CHANNELS 4
49#define DNV_NUM_CHANNELS 2
50#define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */
51
52enum type {
53 APL,
54 DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
55};
56
57struct dram_addr {
58 int chan;
59 int dimm;
60 int rank;
61 int bank;
62 int row;
63 int col;
64};
65
66struct pnd2_pvt {
67 int dimm_geom[APL_NUM_CHANNELS];
68 u64 tolm, tohm;
69};
70
71/*
72 * System address space is divided into multiple regions with
73 * different interleave rules in each. The as0/as1 regions
74 * have no interleaving at all. The as2 region is interleaved
75 * between two channels. The mot region is magic and may overlap
76 * other regions, with its interleave rules taking precedence.
77 * Addresses not in any of these regions are interleaved across
78 * all four channels.
79 */
80static struct region {
81 u64 base;
82 u64 limit;
83 u8 enabled;
84} mot, as0, as1, as2;
85
86static struct dunit_ops {
87 char *name;
88 enum type type;
89 int pmiaddr_shift;
90 int pmiidx_shift;
91 int channels;
92 int dimms_per_channel;
93 int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
94 int (*get_registers)(void);
95 int (*check_ecc)(void);
96 void (*mk_region)(char *name, struct region *rp, void *asym);
97 void (*get_dimm_config)(struct mem_ctl_info *mci);
98 int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
99 struct dram_addr *daddr, char *msg);
100} *ops;
101
102static struct mem_ctl_info *pnd2_mci;
103
104#define PND2_MSG_SIZE 256
105
106/* Debug macros */
107#define pnd2_printk(level, fmt, arg...) \
108 edac_printk(level, "pnd2", fmt, ##arg)
109
110#define pnd2_mc_printk(mci, level, fmt, arg...) \
111 edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
112
113#define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
114#define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
115#define SELECTOR_DISABLED (-1)
116#define _4GB (1ul << 32)
117
118#define PMI_ADDRESS_WIDTH 31
119#define PND_MAX_PHYS_BIT 39
120
121#define APL_ASYMSHIFT 28
122#define DNV_ASYMSHIFT 31
123#define CH_HASH_MASK_LSB 6
124#define SLICE_HASH_MASK_LSB 6
125#define MOT_SLC_INTLV_BIT 12
126#define LOG2_PMI_ADDR_GRANULARITY 5
127#define MOT_SHIFT 24
128
129#define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
130#define U64_LSHIFT(val, s) ((u64)(val) << (s))
131
132#ifdef CONFIG_X86_INTEL_SBI_APL
133#include "linux/platform_data/sbi_apl.h"
134int sbi_send(int port, int off, int op, u32 *data)
135{
136 struct sbi_apl_message sbi_arg;
137 int ret, read = 0;
138
139 memset(&sbi_arg, 0, sizeof(sbi_arg));
140
141 if (op == 0 || op == 4 || op == 6)
142 read = 1;
143 else
144 sbi_arg.data = *data;
145
146 sbi_arg.opcode = op;
147 sbi_arg.port_address = port;
148 sbi_arg.register_offset = off;
149 ret = sbi_apl_commit(&sbi_arg);
150 if (ret || sbi_arg.status)
151 edac_dbg(2, "sbi_send status=%d ret=%d data=%x\n",
152 sbi_arg.status, ret, sbi_arg.data);
153
154 if (ret == 0)
155 ret = sbi_arg.status;
156
157 if (ret == 0 && read)
158 *data = sbi_arg.data;
159
160 return ret;
161}
162#else
163int sbi_send(int port, int off, int op, u32 *data)
164{
165 return -EUNATCH;
166}
167#endif
168
169static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
170{
171 int ret = 0;
172
173 edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
174 switch (sz) {
175 case 8:
176 ret = sbi_send(port, off + 4, op, (u32 *)(data + 4));
177 case 4:
178 ret = sbi_send(port, off, op, (u32 *)data);
179 pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
180 sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
181 break;
182 }
183
184 return ret;
185}
186
187static u64 get_mem_ctrl_hub_base_addr(void)
188{
189 struct b_cr_mchbar_lo_pci lo;
190 struct b_cr_mchbar_hi_pci hi;
191 struct pci_dev *pdev;
192
193 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
194 if (pdev) {
195 pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
196 pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
197 pci_dev_put(pdev);
198 } else {
199 return 0;
200 }
201
202 if (!lo.enable) {
203 edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
204 return 0;
205 }
206
207 return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
208}
209
210static u64 get_sideband_reg_base_addr(void)
211{
212 struct pci_dev *pdev;
213 u32 hi, lo;
214
215 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
216 if (pdev) {
217 pci_read_config_dword(pdev, 0x10, &lo);
218 pci_read_config_dword(pdev, 0x14, &hi);
219 pci_dev_put(pdev);
220 return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
221 } else {
222 return 0xfd000000;
223 }
224}
225
226static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
227{
228 struct pci_dev *pdev;
229 char *base;
230 u64 addr;
231
232 if (op == 4) {
233 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
234 if (!pdev)
235 return -ENODEV;
236
237 pci_read_config_dword(pdev, off, data);
238 pci_dev_put(pdev);
239 } else {
240 /* MMIO via memory controller hub base address */
241 if (op == 0 && port == 0x4c) {
242 addr = get_mem_ctrl_hub_base_addr();
243 if (!addr)
244 return -ENODEV;
245 } else {
246 /* MMIO via sideband register base address */
247 addr = get_sideband_reg_base_addr();
248 if (!addr)
249 return -ENODEV;
250 addr += (port << 16);
251 }
252
253 base = ioremap((resource_size_t)addr, 0x10000);
254 if (!base)
255 return -ENODEV;
256
257 if (sz == 8)
258 *(u32 *)(data + 4) = *(u32 *)(base + off + 4);
259 *(u32 *)data = *(u32 *)(base + off);
260
261 iounmap(base);
262 }
263
264 edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
265 (sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
266
267 return 0;
268}
269
270#define RD_REGP(regp, regname, port) \
271 ops->rd_reg(port, \
272 regname##_offset, \
273 regname##_r_opcode, \
274 regp, sizeof(struct regname), \
275 #regname)
276
277#define RD_REG(regp, regname) \
278 ops->rd_reg(regname ## _port, \
279 regname##_offset, \
280 regname##_r_opcode, \
281 regp, sizeof(struct regname), \
282 #regname)
283
284static u64 top_lm, top_hm;
285static bool two_slices;
286static bool two_channels; /* Both PMI channels in one slice enabled */
287
288static u8 sym_chan_mask;
289static u8 asym_chan_mask;
290static u8 chan_mask;
291
292static int slice_selector = -1;
293static int chan_selector = -1;
294static u64 slice_hash_mask;
295static u64 chan_hash_mask;
296
297static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
298{
299 rp->enabled = 1;
300 rp->base = base;
301 rp->limit = limit;
302 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
303}
304
305static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
306{
307 if (mask == 0) {
308 pr_info(FW_BUG "MOT mask cannot be zero\n");
309 return;
310 }
311 if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
312 pr_info(FW_BUG "MOT mask not power of two\n");
313 return;
314 }
315 if (base & ~mask) {
316 pr_info(FW_BUG "MOT region base/mask alignment error\n");
317 return;
318 }
319 rp->base = base;
320 rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
321 rp->enabled = 1;
322 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
323}
324
325static bool in_region(struct region *rp, u64 addr)
326{
327 if (!rp->enabled)
328 return false;
329
330 return rp->base <= addr && addr <= rp->limit;
331}
332
333static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
334{
335 int mask = 0;
336
337 if (!p->slice_0_mem_disabled)
338 mask |= p->sym_slice0_channel_enabled;
339
340 if (!p->slice_1_disabled)
341 mask |= p->sym_slice1_channel_enabled << 2;
342
343 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
344 mask &= 0x5;
345
346 return mask;
347}
348
349static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
350 struct b_cr_asym_mem_region0_mchbar *as0,
351 struct b_cr_asym_mem_region1_mchbar *as1,
352 struct b_cr_asym_2way_mem_region_mchbar *as2way)
353{
354 const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
355 int mask = 0;
356
357 if (as2way->asym_2way_interleave_enable)
358 mask = intlv[as2way->asym_2way_intlv_mode];
359 if (as0->slice0_asym_enable)
360 mask |= (1 << as0->slice0_asym_channel_select);
361 if (as1->slice1_asym_enable)
362 mask |= (4 << as1->slice1_asym_channel_select);
363 if (p->slice_0_mem_disabled)
364 mask &= 0xc;
365 if (p->slice_1_disabled)
366 mask &= 0x3;
367 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
368 mask &= 0x5;
369
370 return mask;
371}
372
373static struct b_cr_tolud_pci tolud;
374static struct b_cr_touud_lo_pci touud_lo;
375static struct b_cr_touud_hi_pci touud_hi;
376static struct b_cr_asym_mem_region0_mchbar asym0;
377static struct b_cr_asym_mem_region1_mchbar asym1;
378static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
379static struct b_cr_mot_out_base_mchbar mot_base;
380static struct b_cr_mot_out_mask_mchbar mot_mask;
381static struct b_cr_slice_channel_hash chash;
382
383/* Apollo Lake dunit */
384/*
385 * Validated on board with just two DIMMs in the [0] and [2] positions
386 * in this array. Other port number matches documentation, but caution
387 * advised.
388 */
389static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
390static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
391
392/* Denverton dunit */
393static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
394static struct d_cr_dsch dsch;
395static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
396static struct d_cr_drp drp[DNV_NUM_CHANNELS];
397static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
398static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
399static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
400static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
401static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
402static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
403
404static void apl_mk_region(char *name, struct region *rp, void *asym)
405{
406 struct b_cr_asym_mem_region0_mchbar *a = asym;
407
408 mk_region(name, rp,
409 U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
410 U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
411 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
412}
413
414static void dnv_mk_region(char *name, struct region *rp, void *asym)
415{
416 struct b_cr_asym_mem_region_denverton *a = asym;
417
418 mk_region(name, rp,
419 U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
420 U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
421 GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
422}
423
424static int apl_get_registers(void)
425{
426 int i;
427
428 if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
429 return -ENODEV;
430
431 for (i = 0; i < APL_NUM_CHANNELS; i++)
432 if (RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
433 return -ENODEV;
434
435 return 0;
436}
437
438static int dnv_get_registers(void)
439{
440 int i;
441
442 if (RD_REG(&dsch, d_cr_dsch))
443 return -ENODEV;
444
445 for (i = 0; i < DNV_NUM_CHANNELS; i++)
446 if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
447 RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
448 RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
449 RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
450 RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
451 RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
452 RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
453 RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
454 return -ENODEV;
455
456 return 0;
457}
458
459/*
460 * Read all the h/w config registers once here (they don't
461 * change at run time. Figure out which address ranges have
462 * which interleave characteristics.
463 */
464static int get_registers(void)
465{
466 const int intlv[] = { 10, 11, 12, 12 };
467
468 if (RD_REG(&tolud, b_cr_tolud_pci) ||
469 RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
470 RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
471 RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
472 RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
473 RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
474 RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
475 RD_REG(&chash, b_cr_slice_channel_hash))
476 return -ENODEV;
477
478 if (ops->get_registers())
479 return -ENODEV;
480
481 if (ops->type == DNV) {
482 /* PMI channel idx (always 0) for asymmetric region */
483 asym0.slice0_asym_channel_select = 0;
484 asym1.slice1_asym_channel_select = 0;
485 /* PMI channel bitmap (always 1) for symmetric region */
486 chash.sym_slice0_channel_enabled = 0x1;
487 chash.sym_slice1_channel_enabled = 0x1;
488 }
489
490 if (asym0.slice0_asym_enable)
491 ops->mk_region("as0", &as0, &asym0);
492
493 if (asym1.slice1_asym_enable)
494 ops->mk_region("as1", &as1, &asym1);
495
496 if (asym_2way.asym_2way_interleave_enable) {
497 mk_region("as2way", &as2,
498 U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
499 U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
500 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
501 }
502
503 if (mot_base.imr_en) {
504 mk_region_mask("mot", &mot,
505 U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
506 U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
507 }
508
509 top_lm = U64_LSHIFT(tolud.tolud, 20);
510 top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
511
512 two_slices = !chash.slice_1_disabled &&
513 !chash.slice_0_mem_disabled &&
514 (chash.sym_slice0_channel_enabled != 0) &&
515 (chash.sym_slice1_channel_enabled != 0);
516 two_channels = !chash.ch_1_disabled &&
517 !chash.enable_pmi_dual_data_mode &&
518 ((chash.sym_slice0_channel_enabled == 3) ||
519 (chash.sym_slice1_channel_enabled == 3));
520
521 sym_chan_mask = gen_sym_mask(&chash);
522 asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
523 chan_mask = sym_chan_mask | asym_chan_mask;
524
525 if (two_slices && !two_channels) {
526 if (chash.hvm_mode)
527 slice_selector = 29;
528 else
529 slice_selector = intlv[chash.interleave_mode];
530 } else if (!two_slices && two_channels) {
531 if (chash.hvm_mode)
532 chan_selector = 29;
533 else
534 chan_selector = intlv[chash.interleave_mode];
535 } else if (two_slices && two_channels) {
536 if (chash.hvm_mode) {
537 slice_selector = 29;
538 chan_selector = 30;
539 } else {
540 slice_selector = intlv[chash.interleave_mode];
541 chan_selector = intlv[chash.interleave_mode] + 1;
542 }
543 }
544
545 if (two_slices) {
546 if (!chash.hvm_mode)
547 slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
548 if (!two_channels)
549 slice_hash_mask |= BIT_ULL(slice_selector);
550 }
551
552 if (two_channels) {
553 if (!chash.hvm_mode)
554 chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
555 if (!two_slices)
556 chan_hash_mask |= BIT_ULL(chan_selector);
557 }
558
559 return 0;
560}
561
562/* Get a contiguous memory address (remove the MMIO gap) */
563static u64 remove_mmio_gap(u64 sys)
564{
565 return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
566}
567
568/* Squeeze out one address bit, shift upper part down to fill gap */
569static void remove_addr_bit(u64 *addr, int bitidx)
570{
571 u64 mask;
572
573 if (bitidx == -1)
574 return;
575
576 mask = (1ull << bitidx) - 1;
577 *addr = ((*addr >> 1) & ~mask) | (*addr & mask);
578}
579
580/* XOR all the bits from addr specified in mask */
581static int hash_by_mask(u64 addr, u64 mask)
582{
583 u64 result = addr & mask;
584
585 result = (result >> 32) ^ result;
586 result = (result >> 16) ^ result;
587 result = (result >> 8) ^ result;
588 result = (result >> 4) ^ result;
589 result = (result >> 2) ^ result;
590 result = (result >> 1) ^ result;
591
592 return (int)result & 1;
593}
594
595/*
596 * First stage decode. Take the system address and figure out which
597 * second stage will deal with it based on interleave modes.
598 */
599static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
600{
601 u64 contig_addr, contig_base, contig_offset, contig_base_adj;
602 int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
603 MOT_CHAN_INTLV_BIT_1SLC_2CH;
604 int slice_intlv_bit_rm = SELECTOR_DISABLED;
605 int chan_intlv_bit_rm = SELECTOR_DISABLED;
606 /* Determine if address is in the MOT region. */
607 bool mot_hit = in_region(&mot, addr);
608 /* Calculate the number of symmetric regions enabled. */
609 int sym_channels = hweight8(sym_chan_mask);
610
611 /*
612 * The amount we need to shift the asym base can be determined by the
613 * number of enabled symmetric channels.
614 * NOTE: This can only work because symmetric memory is not supposed
615 * to do a 3-way interleave.
616 */
617 int sym_chan_shift = sym_channels >> 1;
618
619 /* Give up if address is out of range, or in MMIO gap */
620 if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
621 (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
622 snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
623 return -EINVAL;
624 }
625
626 /* Get a contiguous memory address (remove the MMIO gap) */
627 contig_addr = remove_mmio_gap(addr);
628
629 if (in_region(&as0, addr)) {
630 *pmiidx = asym0.slice0_asym_channel_select;
631
632 contig_base = remove_mmio_gap(as0.base);
633 contig_offset = contig_addr - contig_base;
634 contig_base_adj = (contig_base >> sym_chan_shift) *
635 ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
636 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
637 } else if (in_region(&as1, addr)) {
638 *pmiidx = 2u + asym1.slice1_asym_channel_select;
639
640 contig_base = remove_mmio_gap(as1.base);
641 contig_offset = contig_addr - contig_base;
642 contig_base_adj = (contig_base >> sym_chan_shift) *
643 ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
644 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
645 } else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
646 bool channel1;
647
648 mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
649 *pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
650 channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
651 hash_by_mask(contig_addr, chan_hash_mask);
652 *pmiidx |= (u32)channel1;
653
654 contig_base = remove_mmio_gap(as2.base);
655 chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
656 contig_offset = contig_addr - contig_base;
657 remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
658 contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
659 } else {
660 /* Otherwise we're in normal, boring symmetric mode. */
661 *pmiidx = 0u;
662
663 if (two_slices) {
664 bool slice1;
665
666 if (mot_hit) {
667 slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
668 slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
669 } else {
670 slice_intlv_bit_rm = slice_selector;
671 slice1 = hash_by_mask(addr, slice_hash_mask);
672 }
673
674 *pmiidx = (u32)slice1 << 1;
675 }
676
677 if (two_channels) {
678 bool channel1;
679
680 mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
681 MOT_CHAN_INTLV_BIT_1SLC_2CH;
682
683 if (mot_hit) {
684 chan_intlv_bit_rm = mot_intlv_bit;
685 channel1 = (addr >> mot_intlv_bit) & 1;
686 } else {
687 chan_intlv_bit_rm = chan_selector;
688 channel1 = hash_by_mask(contig_addr, chan_hash_mask);
689 }
690
691 *pmiidx |= (u32)channel1;
692 }
693 }
694
695 /* Remove the chan_selector bit first */
696 remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
697 /* Remove the slice bit (we remove it second because it must be lower */
698 remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
699 *pmiaddr = contig_addr;
700
701 return 0;
702}
703
704/* Translate PMI address to memory (rank, row, bank, column) */
705#define C(n) (0x10 | (n)) /* column */
706#define B(n) (0x20 | (n)) /* bank */
707#define R(n) (0x40 | (n)) /* row */
708#define RS (0x80) /* rank */
709
710/* addrdec values */
711#define AMAP_1KB 0
712#define AMAP_2KB 1
713#define AMAP_4KB 2
714#define AMAP_RSVD 3
715
716/* dden values */
717#define DEN_4Gb 0
718#define DEN_8Gb 2
719
720/* dwid values */
721#define X8 0
722#define X16 1
723
724static struct dimm_geometry {
725 u8 addrdec;
726 u8 dden;
727 u8 dwid;
728 u8 rowbits, colbits;
729 u16 bits[PMI_ADDRESS_WIDTH];
730} dimms[] = {
731 {
732 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
733 .rowbits = 15, .colbits = 10,
734 .bits = {
735 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
736 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
737 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
738 0, 0, 0, 0
739 }
740 },
741 {
742 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
743 .rowbits = 16, .colbits = 10,
744 .bits = {
745 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
746 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
747 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
748 R(15), 0, 0, 0
749 }
750 },
751 {
752 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
753 .rowbits = 16, .colbits = 10,
754 .bits = {
755 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
756 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
757 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
758 R(15), 0, 0, 0
759 }
760 },
761 {
762 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
763 .rowbits = 16, .colbits = 11,
764 .bits = {
765 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
766 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
767 R(10), C(7), C(8), C(9), R(11), RS, C(11), R(12), R(13),
768 R(14), R(15), 0, 0
769 }
770 },
771 {
772 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
773 .rowbits = 15, .colbits = 10,
774 .bits = {
775 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
776 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
777 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
778 0, 0, 0, 0
779 }
780 },
781 {
782 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
783 .rowbits = 16, .colbits = 10,
784 .bits = {
785 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
786 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
787 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
788 R(15), 0, 0, 0
789 }
790 },
791 {
792 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
793 .rowbits = 16, .colbits = 10,
794 .bits = {
795 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
796 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
797 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
798 R(15), 0, 0, 0
799 }
800 },
801 {
802 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
803 .rowbits = 16, .colbits = 11,
804 .bits = {
805 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
806 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
807 R(9), R(10), C(8), C(9), R(11), RS, C(11), R(12), R(13),
808 R(14), R(15), 0, 0
809 }
810 },
811 {
812 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
813 .rowbits = 15, .colbits = 10,
814 .bits = {
815 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
816 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
817 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
818 0, 0, 0, 0
819 }
820 },
821 {
822 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
823 .rowbits = 16, .colbits = 10,
824 .bits = {
825 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
826 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
827 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
828 R(15), 0, 0, 0
829 }
830 },
831 {
832 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
833 .rowbits = 16, .colbits = 10,
834 .bits = {
835 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
836 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
837 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
838 R(15), 0, 0, 0
839 }
840 },
841 {
842 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
843 .rowbits = 16, .colbits = 11,
844 .bits = {
845 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
846 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
847 R(8), R(9), R(10), C(9), R(11), RS, C(11), R(12), R(13),
848 R(14), R(15), 0, 0
849 }
850 }
851};
852
853static int bank_hash(u64 pmiaddr, int idx, int shft)
854{
855 int bhash = 0;
856
857 switch (idx) {
858 case 0:
859 bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
860 break;
861 case 1:
862 bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
863 bhash ^= ((pmiaddr >> 22) & 1) << 1;
864 break;
865 case 2:
866 bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
867 break;
868 }
869
870 return bhash;
871}
872
873static int rank_hash(u64 pmiaddr)
874{
875 return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
876}
877
878/* Second stage decode. Compute rank, bank, row & column. */
879static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
880 struct dram_addr *daddr, char *msg)
881{
882 struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
883 struct pnd2_pvt *pvt = mci->pvt_info;
884 int g = pvt->dimm_geom[pmiidx];
885 struct dimm_geometry *d = &dimms[g];
886 int column = 0, bank = 0, row = 0, rank = 0;
887 int i, idx, type, skiprs = 0;
888
889 for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
890 int bit = (pmiaddr >> i) & 1;
891
892 if (i + skiprs >= PMI_ADDRESS_WIDTH) {
893 snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
894 return -EINVAL;
895 }
896
897 type = d->bits[i + skiprs] & ~0xf;
898 idx = d->bits[i + skiprs] & 0xf;
899
900 /*
901 * On single rank DIMMs ignore the rank select bit
902 * and shift remainder of "bits[]" down one place.
903 */
904 if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
905 skiprs = 1;
906 type = d->bits[i + skiprs] & ~0xf;
907 idx = d->bits[i + skiprs] & 0xf;
908 }
909
910 switch (type) {
911 case C(0):
912 column |= (bit << idx);
913 break;
914 case B(0):
915 bank |= (bit << idx);
916 if (cr_drp0->bahen)
917 bank ^= bank_hash(pmiaddr, idx, d->addrdec);
918 break;
919 case R(0):
920 row |= (bit << idx);
921 break;
922 case RS:
923 rank = bit;
924 if (cr_drp0->rsien)
925 rank ^= rank_hash(pmiaddr);
926 break;
927 default:
928 if (bit) {
929 snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
930 return -EINVAL;
931 }
932 goto done;
933 }
934 }
935
936done:
937 daddr->col = column;
938 daddr->bank = bank;
939 daddr->row = row;
940 daddr->rank = rank;
941 daddr->dimm = 0;
942
943 return 0;
944}
945
946/* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
947#define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
948
949static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
950 struct dram_addr *daddr, char *msg)
951{
952 /* Rank 0 or 1 */
953 daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
954 /* Rank 2 or 3 */
955 daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
956
957 /*
958 * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
959 * flip them if DIMM1 is larger than DIMM0.
960 */
961 daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
962
963 daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
964 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
965 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
966 if (dsch.ddr4en)
967 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
968 if (dmap1[pmiidx].bxor) {
969 if (dsch.ddr4en) {
970 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
971 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
972 if (dsch.chan_width == 0)
973 /* 64/72 bit dram channel width */
974 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
975 else
976 /* 32/40 bit dram channel width */
977 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
978 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
979 } else {
980 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
981 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
982 if (dsch.chan_width == 0)
983 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
984 else
985 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
986 }
987 }
988
989 daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
990 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
991 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
992 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
993 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
994 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
995 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
996 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
997 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
998 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
999 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
1000 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
1001 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
1002 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
1003 if (dmap4[pmiidx].row14 != 31)
1004 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
1005 if (dmap4[pmiidx].row15 != 31)
1006 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
1007 if (dmap4[pmiidx].row16 != 31)
1008 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
1009 if (dmap4[pmiidx].row17 != 31)
1010 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
1011
1012 daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
1013 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
1014 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
1015 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
1016 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
1017 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
1018 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
1019 if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
1020 daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
1021
1022 return 0;
1023}
1024
1025static int check_channel(int ch)
1026{
1027 if (drp0[ch].dramtype != 0) {
1028 pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
1029 return 1;
1030 } else if (drp0[ch].eccen == 0) {
1031 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1032 return 1;
1033 }
1034 return 0;
1035}
1036
1037static int apl_check_ecc_active(void)
1038{
1039 int i, ret = 0;
1040
1041 /* Check dramtype and ECC mode for each present DIMM */
1042 for (i = 0; i < APL_NUM_CHANNELS; i++)
1043 if (chan_mask & BIT(i))
1044 ret += check_channel(i);
1045 return ret ? -EINVAL : 0;
1046}
1047
1048#define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
1049
1050static int check_unit(int ch)
1051{
1052 struct d_cr_drp *d = &drp[ch];
1053
1054 if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
1055 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1056 return 1;
1057 }
1058 return 0;
1059}
1060
1061static int dnv_check_ecc_active(void)
1062{
1063 int i, ret = 0;
1064
1065 for (i = 0; i < DNV_NUM_CHANNELS; i++)
1066 ret += check_unit(i);
1067 return ret ? -EINVAL : 0;
1068}
1069
1070static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
1071 struct dram_addr *daddr, char *msg)
1072{
1073 u64 pmiaddr;
1074 u32 pmiidx;
1075 int ret;
1076
1077 ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
1078 if (ret)
1079 return ret;
1080
1081 pmiaddr >>= ops->pmiaddr_shift;
1082 /* pmi channel idx to dimm channel idx */
1083 pmiidx >>= ops->pmiidx_shift;
1084 daddr->chan = pmiidx;
1085
1086 ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
1087 if (ret)
1088 return ret;
1089
1090 edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1091 addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
1092
1093 return 0;
1094}
1095
1096static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
1097 struct dram_addr *daddr)
1098{
1099 enum hw_event_mc_err_type tp_event;
1100 char *optype, msg[PND2_MSG_SIZE];
1101 bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
1102 bool overflow = m->status & MCI_STATUS_OVER;
1103 bool uc_err = m->status & MCI_STATUS_UC;
1104 bool recov = m->status & MCI_STATUS_S;
1105 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1106 u32 mscod = GET_BITFIELD(m->status, 16, 31);
1107 u32 errcode = GET_BITFIELD(m->status, 0, 15);
1108 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1109 int rc;
1110
1111 tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
1112 HW_EVENT_ERR_CORRECTED;
1113
1114 /*
1115 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1116 * memory errors should fit in this mask:
1117 * 000f 0000 1mmm cccc (binary)
1118 * where:
1119 * f = Correction Report Filtering Bit. If 1, subsequent errors
1120 * won't be shown
1121 * mmm = error type
1122 * cccc = channel
1123 * If the mask doesn't match, report an error to the parsing logic
1124 */
1125 if (!((errcode & 0xef80) == 0x80)) {
1126 optype = "Can't parse: it is not a mem";
1127 } else {
1128 switch (optypenum) {
1129 case 0:
1130 optype = "generic undef request error";
1131 break;
1132 case 1:
1133 optype = "memory read error";
1134 break;
1135 case 2:
1136 optype = "memory write error";
1137 break;
1138 case 3:
1139 optype = "addr/cmd error";
1140 break;
1141 case 4:
1142 optype = "memory scrubbing error";
1143 break;
1144 default:
1145 optype = "reserved";
1146 break;
1147 }
1148 }
1149
1150 /* Only decode errors with an valid address (ADDRV) */
1151 if (!(m->status & MCI_STATUS_ADDRV))
1152 return;
1153
1154 rc = get_memory_error_data(mci, m->addr, daddr, msg);
1155 if (rc)
1156 goto address_error;
1157
1158 snprintf(msg, sizeof(msg),
1159 "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
1160 overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
1161 errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
1162
1163 edac_dbg(0, "%s\n", msg);
1164
1165 /* Call the helper to output message */
1166 edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
1167 m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
1168
1169 return;
1170
1171address_error:
1172 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
1173}
1174
1175static void apl_get_dimm_config(struct mem_ctl_info *mci)
1176{
1177 struct pnd2_pvt *pvt = mci->pvt_info;
1178 struct dimm_info *dimm;
1179 struct d_cr_drp0 *d;
1180 u64 capacity;
1181 int i, g;
1182
1183 for (i = 0; i < APL_NUM_CHANNELS; i++) {
1184 if (!(chan_mask & BIT(i)))
1185 continue;
1186
1187 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0);
1188 if (!dimm) {
1189 edac_dbg(0, "No allocated DIMM for channel %d\n", i);
1190 continue;
1191 }
1192
1193 d = &drp0[i];
1194 for (g = 0; g < ARRAY_SIZE(dimms); g++)
1195 if (dimms[g].addrdec == d->addrdec &&
1196 dimms[g].dden == d->dden &&
1197 dimms[g].dwid == d->dwid)
1198 break;
1199
1200 if (g == ARRAY_SIZE(dimms)) {
1201 edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
1202 continue;
1203 }
1204
1205 pvt->dimm_geom[i] = g;
1206 capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
1207 (1ul << dimms[g].colbits);
1208 edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
1209 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1210 dimm->grain = 32;
1211 dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
1212 dimm->mtype = MEM_DDR3;
1213 dimm->edac_mode = EDAC_SECDED;
1214 snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
1215 }
1216}
1217
1218static const int dnv_dtypes[] = {
1219 DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
1220};
1221
1222static void dnv_get_dimm_config(struct mem_ctl_info *mci)
1223{
1224 int i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
1225 struct dimm_info *dimm;
1226 struct d_cr_drp *d;
1227 u64 capacity;
1228
1229 if (dsch.ddr4en) {
1230 memtype = MEM_DDR4;
1231 banks = 16;
1232 colbits = 10;
1233 } else {
1234 memtype = MEM_DDR3;
1235 banks = 8;
1236 }
1237
1238 for (i = 0; i < DNV_NUM_CHANNELS; i++) {
1239 if (dmap4[i].row14 == 31)
1240 rowbits = 14;
1241 else if (dmap4[i].row15 == 31)
1242 rowbits = 15;
1243 else if (dmap4[i].row16 == 31)
1244 rowbits = 16;
1245 else if (dmap4[i].row17 == 31)
1246 rowbits = 17;
1247 else
1248 rowbits = 18;
1249
1250 if (memtype == MEM_DDR3) {
1251 if (dmap1[i].ca11 != 0x3f)
1252 colbits = 12;
1253 else
1254 colbits = 10;
1255 }
1256
1257 d = &drp[i];
1258 /* DIMM0 is present if rank0 and/or rank1 is enabled */
1259 ranks_of_dimm[0] = d->rken0 + d->rken1;
1260 /* DIMM1 is present if rank2 and/or rank3 is enabled */
1261 ranks_of_dimm[1] = d->rken2 + d->rken3;
1262
1263 for (j = 0; j < DNV_MAX_DIMMS; j++) {
1264 if (!ranks_of_dimm[j])
1265 continue;
1266
1267 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
1268 if (!dimm) {
1269 edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
1270 continue;
1271 }
1272
1273 capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
1274 edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
1275 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1276 dimm->grain = 32;
1277 dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
1278 dimm->mtype = memtype;
1279 dimm->edac_mode = EDAC_SECDED;
1280 snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
1281 }
1282 }
1283}
1284
1285static int pnd2_register_mci(struct mem_ctl_info **ppmci)
1286{
1287 struct edac_mc_layer layers[2];
1288 struct mem_ctl_info *mci;
1289 struct pnd2_pvt *pvt;
1290 int rc;
1291
1292 rc = ops->check_ecc();
1293 if (rc < 0)
1294 return rc;
1295
1296 /* Allocate a new MC control structure */
1297 layers[0].type = EDAC_MC_LAYER_CHANNEL;
1298 layers[0].size = ops->channels;
1299 layers[0].is_virt_csrow = false;
1300 layers[1].type = EDAC_MC_LAYER_SLOT;
1301 layers[1].size = ops->dimms_per_channel;
1302 layers[1].is_virt_csrow = true;
1303 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1304 if (!mci)
1305 return -ENOMEM;
1306
1307 pvt = mci->pvt_info;
1308 memset(pvt, 0, sizeof(*pvt));
1309
1310 mci->mod_name = "pnd2_edac.c";
1311 mci->dev_name = ops->name;
1312 mci->ctl_name = "Pondicherry2";
1313
1314 /* Get dimm basic config and the memory layout */
1315 ops->get_dimm_config(mci);
1316
1317 if (edac_mc_add_mc(mci)) {
1318 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1319 edac_mc_free(mci);
1320 return -EINVAL;
1321 }
1322
1323 *ppmci = mci;
1324
1325 return 0;
1326}
1327
1328static void pnd2_unregister_mci(struct mem_ctl_info *mci)
1329{
1330 if (unlikely(!mci || !mci->pvt_info)) {
1331 pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
1332 return;
1333 }
1334
1335 /* Remove MC sysfs nodes */
1336 edac_mc_del_mc(NULL);
1337 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1338 edac_mc_free(mci);
1339}
1340
1341/*
1342 * Callback function registered with core kernel mce code.
1343 * Called once for each logged error.
1344 */
1345static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
1346{
1347 struct mce *mce = (struct mce *)data;
1348 struct mem_ctl_info *mci;
1349 struct dram_addr daddr;
1350 char *type;
1351
1352 if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
1353 return NOTIFY_DONE;
1354
1355 mci = pnd2_mci;
1356 if (!mci)
1357 return NOTIFY_DONE;
1358
1359 /*
1360 * Just let mcelog handle it if the error is
1361 * outside the memory controller. A memory error
1362 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
1363 * bit 12 has an special meaning.
1364 */
1365 if ((mce->status & 0xefff) >> 7 != 1)
1366 return NOTIFY_DONE;
1367
1368 if (mce->mcgstatus & MCG_STATUS_MCIP)
1369 type = "Exception";
1370 else
1371 type = "Event";
1372
1373 pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
1374 pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
1375 mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
1376 pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
1377 pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
1378 pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
1379 pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1380 mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
1381
1382 pnd2_mce_output_error(mci, mce, &daddr);
1383
1384 /* Advice mcelog that the error were handled */
1385 return NOTIFY_STOP;
1386}
1387
1388static struct notifier_block pnd2_mce_dec = {
1389 .notifier_call = pnd2_mce_check_error,
1390};
1391
1392#ifdef CONFIG_EDAC_DEBUG
1393/*
1394 * Write an address to this file to exercise the address decode
1395 * logic in this driver.
1396 */
1397static u64 pnd2_fake_addr;
1398#define PND2_BLOB_SIZE 1024
1399static char pnd2_result[PND2_BLOB_SIZE];
1400static struct dentry *pnd2_test;
1401static struct debugfs_blob_wrapper pnd2_blob = {
1402 .data = pnd2_result,
1403 .size = 0
1404};
1405
1406static int debugfs_u64_set(void *data, u64 val)
1407{
1408 struct dram_addr daddr;
1409 struct mce m;
1410
1411 *(u64 *)data = val;
1412 m.mcgstatus = 0;
1413 /* ADDRV + MemRd + Unknown channel */
1414 m.status = MCI_STATUS_ADDRV + 0x9f;
1415 m.addr = val;
1416 pnd2_mce_output_error(pnd2_mci, &m, &daddr);
1417 snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
1418 "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1419 m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
1420 pnd2_blob.size = strlen(pnd2_blob.data);
1421
1422 return 0;
1423}
1424DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
1425
1426static void setup_pnd2_debug(void)
1427{
1428 pnd2_test = edac_debugfs_create_dir("pnd2_test");
1429 edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
1430 &pnd2_fake_addr, &fops_u64_wo);
1431 debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
1432}
1433
1434static void teardown_pnd2_debug(void)
1435{
1436 debugfs_remove_recursive(pnd2_test);
1437}
1438#else
1439static void setup_pnd2_debug(void) {}
1440static void teardown_pnd2_debug(void) {}
1441#endif /* CONFIG_EDAC_DEBUG */
1442
1443
1444static int pnd2_probe(void)
1445{
1446 int rc;
1447
1448 edac_dbg(2, "\n");
1449 rc = get_registers();
1450 if (rc)
1451 return rc;
1452
1453 return pnd2_register_mci(&pnd2_mci);
1454}
1455
1456static void pnd2_remove(void)
1457{
1458 edac_dbg(0, "\n");
1459 pnd2_unregister_mci(pnd2_mci);
1460}
1461
1462static struct dunit_ops apl_ops = {
1463 .name = "pnd2/apl",
1464 .type = APL,
1465 .pmiaddr_shift = LOG2_PMI_ADDR_GRANULARITY,
1466 .pmiidx_shift = 0,
1467 .channels = APL_NUM_CHANNELS,
1468 .dimms_per_channel = 1,
1469 .rd_reg = apl_rd_reg,
1470 .get_registers = apl_get_registers,
1471 .check_ecc = apl_check_ecc_active,
1472 .mk_region = apl_mk_region,
1473 .get_dimm_config = apl_get_dimm_config,
1474 .pmi2mem = apl_pmi2mem,
1475};
1476
1477static struct dunit_ops dnv_ops = {
1478 .name = "pnd2/dnv",
1479 .type = DNV,
1480 .pmiaddr_shift = 0,
1481 .pmiidx_shift = 1,
1482 .channels = DNV_NUM_CHANNELS,
1483 .dimms_per_channel = 2,
1484 .rd_reg = dnv_rd_reg,
1485 .get_registers = dnv_get_registers,
1486 .check_ecc = dnv_check_ecc_active,
1487 .mk_region = dnv_mk_region,
1488 .get_dimm_config = dnv_get_dimm_config,
1489 .pmi2mem = dnv_pmi2mem,
1490};
1491
1492static const struct x86_cpu_id pnd2_cpuids[] = {
1493 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
1494 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops },
1495 { }
1496};
1497MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
1498
1499static int __init pnd2_init(void)
1500{
1501 const struct x86_cpu_id *id;
1502 int rc;
1503
1504 edac_dbg(2, "\n");
1505
1506 id = x86_match_cpu(pnd2_cpuids);
1507 if (!id)
1508 return -ENODEV;
1509
1510 ops = (struct dunit_ops *)id->driver_data;
1511
1512 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1513 opstate_init();
1514
1515 rc = pnd2_probe();
1516 if (rc < 0) {
1517 pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
1518 return rc;
1519 }
1520
1521 if (!pnd2_mci)
1522 return -ENODEV;
1523
1524 mce_register_decode_chain(&pnd2_mce_dec);
1525 setup_pnd2_debug();
1526
1527 return 0;
1528}
1529
1530static void __exit pnd2_exit(void)
1531{
1532 edac_dbg(2, "\n");
1533 teardown_pnd2_debug();
1534 mce_unregister_decode_chain(&pnd2_mce_dec);
1535 pnd2_remove();
1536}
1537
1538module_init(pnd2_init);
1539module_exit(pnd2_exit);
1540
1541module_param(edac_op_state, int, 0444);
1542MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1543
1544MODULE_LICENSE("GPL v2");
1545MODULE_AUTHOR("Tony Luck");
1546MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");
diff --git a/drivers/edac/pnd2_edac.h b/drivers/edac/pnd2_edac.h
new file mode 100644
index 000000000000..61b6e79492bb
--- /dev/null
+++ b/drivers/edac/pnd2_edac.h
@@ -0,0 +1,301 @@
1/*
2 * Register bitfield descriptions for Pondicherry2 memory controller.
3 *
4 * Copyright (c) 2016, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#ifndef _PND2_REGS_H
17#define _PND2_REGS_H
18
19struct b_cr_touud_lo_pci {
20 u32 lock : 1;
21 u32 reserved_1 : 19;
22 u32 touud : 12;
23};
24
25#define b_cr_touud_lo_pci_port 0x4c
26#define b_cr_touud_lo_pci_offset 0xa8
27#define b_cr_touud_lo_pci_r_opcode 0x04
28
29struct b_cr_touud_hi_pci {
30 u32 touud : 7;
31 u32 reserved_0 : 25;
32};
33
34#define b_cr_touud_hi_pci_port 0x4c
35#define b_cr_touud_hi_pci_offset 0xac
36#define b_cr_touud_hi_pci_r_opcode 0x04
37
38struct b_cr_tolud_pci {
39 u32 lock : 1;
40 u32 reserved_0 : 19;
41 u32 tolud : 12;
42};
43
44#define b_cr_tolud_pci_port 0x4c
45#define b_cr_tolud_pci_offset 0xbc
46#define b_cr_tolud_pci_r_opcode 0x04
47
48struct b_cr_mchbar_lo_pci {
49 u32 enable : 1;
50 u32 pad_3_1 : 3;
51 u32 pad_14_4: 11;
52 u32 base: 17;
53};
54
55struct b_cr_mchbar_hi_pci {
56 u32 base : 7;
57 u32 pad_31_7 : 25;
58};
59
60/* Symmetric region */
61struct b_cr_slice_channel_hash {
62 u64 slice_1_disabled : 1;
63 u64 hvm_mode : 1;
64 u64 interleave_mode : 2;
65 u64 slice_0_mem_disabled : 1;
66 u64 reserved_0 : 1;
67 u64 slice_hash_mask : 14;
68 u64 reserved_1 : 11;
69 u64 enable_pmi_dual_data_mode : 1;
70 u64 ch_1_disabled : 1;
71 u64 reserved_2 : 1;
72 u64 sym_slice0_channel_enabled : 2;
73 u64 sym_slice1_channel_enabled : 2;
74 u64 ch_hash_mask : 14;
75 u64 reserved_3 : 11;
76 u64 lock : 1;
77};
78
79#define b_cr_slice_channel_hash_port 0x4c
80#define b_cr_slice_channel_hash_offset 0x4c58
81#define b_cr_slice_channel_hash_r_opcode 0x06
82
83struct b_cr_mot_out_base_mchbar {
84 u32 reserved_0 : 14;
85 u32 mot_out_base : 15;
86 u32 reserved_1 : 1;
87 u32 tr_en : 1;
88 u32 imr_en : 1;
89};
90
91#define b_cr_mot_out_base_mchbar_port 0x4c
92#define b_cr_mot_out_base_mchbar_offset 0x6af0
93#define b_cr_mot_out_base_mchbar_r_opcode 0x00
94
95struct b_cr_mot_out_mask_mchbar {
96 u32 reserved_0 : 14;
97 u32 mot_out_mask : 15;
98 u32 reserved_1 : 1;
99 u32 ia_iwb_en : 1;
100 u32 gt_iwb_en : 1;
101};
102
103#define b_cr_mot_out_mask_mchbar_port 0x4c
104#define b_cr_mot_out_mask_mchbar_offset 0x6af4
105#define b_cr_mot_out_mask_mchbar_r_opcode 0x00
106
107struct b_cr_asym_mem_region0_mchbar {
108 u32 pad : 4;
109 u32 slice0_asym_base : 11;
110 u32 pad_18_15 : 4;
111 u32 slice0_asym_limit : 11;
112 u32 slice0_asym_channel_select : 1;
113 u32 slice0_asym_enable : 1;
114};
115
116#define b_cr_asym_mem_region0_mchbar_port 0x4c
117#define b_cr_asym_mem_region0_mchbar_offset 0x6e40
118#define b_cr_asym_mem_region0_mchbar_r_opcode 0x00
119
120struct b_cr_asym_mem_region1_mchbar {
121 u32 pad : 4;
122 u32 slice1_asym_base : 11;
123 u32 pad_18_15 : 4;
124 u32 slice1_asym_limit : 11;
125 u32 slice1_asym_channel_select : 1;
126 u32 slice1_asym_enable : 1;
127};
128
129#define b_cr_asym_mem_region1_mchbar_port 0x4c
130#define b_cr_asym_mem_region1_mchbar_offset 0x6e44
131#define b_cr_asym_mem_region1_mchbar_r_opcode 0x00
132
133/* Some bit fields moved in above two structs on Denverton */
134struct b_cr_asym_mem_region_denverton {
135 u32 pad : 4;
136 u32 slice_asym_base : 8;
137 u32 pad_19_12 : 8;
138 u32 slice_asym_limit : 8;
139 u32 pad_28_30 : 3;
140 u32 slice_asym_enable : 1;
141};
142
143struct b_cr_asym_2way_mem_region_mchbar {
144 u32 pad : 2;
145 u32 asym_2way_intlv_mode : 2;
146 u32 asym_2way_base : 11;
147 u32 pad_16_15 : 2;
148 u32 asym_2way_limit : 11;
149 u32 pad_30_28 : 3;
150 u32 asym_2way_interleave_enable : 1;
151};
152
153#define b_cr_asym_2way_mem_region_mchbar_port 0x4c
154#define b_cr_asym_2way_mem_region_mchbar_offset 0x6e50
155#define b_cr_asym_2way_mem_region_mchbar_r_opcode 0x00
156
157/* Apollo Lake d-unit */
158
159struct d_cr_drp0 {
160 u32 rken0 : 1;
161 u32 rken1 : 1;
162 u32 ddmen : 1;
163 u32 rsvd3 : 1;
164 u32 dwid : 2;
165 u32 dden : 3;
166 u32 rsvd13_9 : 5;
167 u32 rsien : 1;
168 u32 bahen : 1;
169 u32 rsvd18_16 : 3;
170 u32 caswizzle : 2;
171 u32 eccen : 1;
172 u32 dramtype : 3;
173 u32 blmode : 3;
174 u32 addrdec : 2;
175 u32 dramdevice_pr : 2;
176};
177
178#define d_cr_drp0_offset 0x1400
179#define d_cr_drp0_r_opcode 0x00
180
181/* Denverton d-unit */
182
183struct d_cr_dsch {
184 u32 ch0en : 1;
185 u32 ch1en : 1;
186 u32 ddr4en : 1;
187 u32 coldwake : 1;
188 u32 newbypdis : 1;
189 u32 chan_width : 1;
190 u32 rsvd6_6 : 1;
191 u32 ooodis : 1;
192 u32 rsvd18_8 : 11;
193 u32 ic : 1;
194 u32 rsvd31_20 : 12;
195};
196
197#define d_cr_dsch_port 0x16
198#define d_cr_dsch_offset 0x0
199#define d_cr_dsch_r_opcode 0x0
200
201struct d_cr_ecc_ctrl {
202 u32 eccen : 1;
203 u32 rsvd31_1 : 31;
204};
205
206#define d_cr_ecc_ctrl_offset 0x180
207#define d_cr_ecc_ctrl_r_opcode 0x0
208
209struct d_cr_drp {
210 u32 rken0 : 1;
211 u32 rken1 : 1;
212 u32 rken2 : 1;
213 u32 rken3 : 1;
214 u32 dimmdwid0 : 2;
215 u32 dimmdden0 : 2;
216 u32 dimmdwid1 : 2;
217 u32 dimmdden1 : 2;
218 u32 rsvd15_12 : 4;
219 u32 dimmflip : 1;
220 u32 rsvd31_17 : 15;
221};
222
223#define d_cr_drp_offset 0x158
224#define d_cr_drp_r_opcode 0x0
225
226struct d_cr_dmap {
227 u32 ba0 : 5;
228 u32 ba1 : 5;
229 u32 bg0 : 5; /* if ddr3, ba2 = bg0 */
230 u32 bg1 : 5; /* if ddr3, ba3 = bg1 */
231 u32 rs0 : 5;
232 u32 rs1 : 5;
233 u32 rsvd : 2;
234};
235
236#define d_cr_dmap_offset 0x174
237#define d_cr_dmap_r_opcode 0x0
238
239struct d_cr_dmap1 {
240 u32 ca11 : 6;
241 u32 bxor : 1;
242 u32 rsvd : 25;
243};
244
245#define d_cr_dmap1_offset 0xb4
246#define d_cr_dmap1_r_opcode 0x0
247
248struct d_cr_dmap2 {
249 u32 row0 : 5;
250 u32 row1 : 5;
251 u32 row2 : 5;
252 u32 row3 : 5;
253 u32 row4 : 5;
254 u32 row5 : 5;
255 u32 rsvd : 2;
256};
257
258#define d_cr_dmap2_offset 0x148
259#define d_cr_dmap2_r_opcode 0x0
260
261struct d_cr_dmap3 {
262 u32 row6 : 5;
263 u32 row7 : 5;
264 u32 row8 : 5;
265 u32 row9 : 5;
266 u32 row10 : 5;
267 u32 row11 : 5;
268 u32 rsvd : 2;
269};
270
271#define d_cr_dmap3_offset 0x14c
272#define d_cr_dmap3_r_opcode 0x0
273
274struct d_cr_dmap4 {
275 u32 row12 : 5;
276 u32 row13 : 5;
277 u32 row14 : 5;
278 u32 row15 : 5;
279 u32 row16 : 5;
280 u32 row17 : 5;
281 u32 rsvd : 2;
282};
283
284#define d_cr_dmap4_offset 0x150
285#define d_cr_dmap4_r_opcode 0x0
286
287struct d_cr_dmap5 {
288 u32 ca3 : 4;
289 u32 ca4 : 4;
290 u32 ca5 : 4;
291 u32 ca6 : 4;
292 u32 ca7 : 4;
293 u32 ca8 : 4;
294 u32 ca9 : 4;
295 u32 rsvd : 4;
296};
297
298#define d_cr_dmap5_offset 0x154
299#define d_cr_dmap5_r_opcode 0x0
300
301#endif /* _PND2_REGS_H */
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index 6c270d9d304a..669246056812 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -1596,7 +1596,7 @@ static void xgene_edac_pa_report(struct edac_device_ctl_info *edac_dev)
1596 reg = readl(ctx->dev_csr + IOBPATRANSERRINTSTS); 1596 reg = readl(ctx->dev_csr + IOBPATRANSERRINTSTS);
1597 if (!reg) 1597 if (!reg)
1598 goto chk_iob_axi0; 1598 goto chk_iob_axi0;
1599 dev_err(edac_dev->dev, "IOB procesing agent (PA) transaction error\n"); 1599 dev_err(edac_dev->dev, "IOB processing agent (PA) transaction error\n");
1600 if (reg & IOBPA_RDATA_CORRUPT_MASK) 1600 if (reg & IOBPA_RDATA_CORRUPT_MASK)
1601 dev_err(edac_dev->dev, "IOB PA read data RAM error\n"); 1601 dev_err(edac_dev->dev, "IOB PA read data RAM error\n");
1602 if (reg & IOBPA_M_RDATA_CORRUPT_MASK) 1602 if (reg & IOBPA_M_RDATA_CORRUPT_MASK)
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index e7d404059b73..b372aad3b449 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -389,7 +389,6 @@ int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
389 return 0; 389 return 0;
390 } 390 }
391 } 391 }
392 pr_err_once("requested map not found.\n");
393 return -ENOENT; 392 return -ENOENT;
394} 393}
395 394
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 08b026864d4e..8554d7aec31c 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -254,7 +254,7 @@ void __init efi_esrt_init(void)
254 254
255 rc = efi_mem_desc_lookup(efi.esrt, &md); 255 rc = efi_mem_desc_lookup(efi.esrt, &md);
256 if (rc < 0) { 256 if (rc < 0) {
257 pr_err("ESRT header is not in the memory map.\n"); 257 pr_warn("ESRT header is not in the memory map.\n");
258 return; 258 return;
259 } 259 }
260 260
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
index 932742e4cf23..24c461dea7af 100644
--- a/drivers/firmware/efi/libstub/gop.c
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -149,7 +149,8 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
149 149
150 status = __gop_query32(sys_table_arg, gop32, &info, &size, 150 status = __gop_query32(sys_table_arg, gop32, &info, &size,
151 &current_fb_base); 151 &current_fb_base);
152 if (status == EFI_SUCCESS && (!first_gop || conout_found)) { 152 if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
153 info->pixel_format != PIXEL_BLT_ONLY) {
153 /* 154 /*
154 * Systems that use the UEFI Console Splitter may 155 * Systems that use the UEFI Console Splitter may
155 * provide multiple GOP devices, not all of which are 156 * provide multiple GOP devices, not all of which are
@@ -266,7 +267,8 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
266 267
267 status = __gop_query64(sys_table_arg, gop64, &info, &size, 268 status = __gop_query64(sys_table_arg, gop64, &info, &size,
268 &current_fb_base); 269 &current_fb_base);
269 if (status == EFI_SUCCESS && (!first_gop || conout_found)) { 270 if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
271 info->pixel_format != PIXEL_BLT_ONLY) {
270 /* 272 /*
271 * Systems that use the UEFI Console Splitter may 273 * Systems that use the UEFI Console Splitter may
272 * provide multiple GOP devices, not all of which are 274 * provide multiple GOP devices, not all of which are
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 9b37a3692b3f..2bd683e2be02 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -266,6 +266,9 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
266 goto fail_free_event; 266 goto fail_free_event;
267 } 267 }
268 268
269 if (agpio->wake_capable == ACPI_WAKE_CAPABLE)
270 enable_irq_wake(irq);
271
269 list_add_tail(&event->node, &acpi_gpio->events); 272 list_add_tail(&event->node, &acpi_gpio->events);
270 return AE_OK; 273 return AE_OK;
271 274
@@ -339,6 +342,9 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
339 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { 342 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
340 struct gpio_desc *desc; 343 struct gpio_desc *desc;
341 344
345 if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
346 disable_irq_wake(event->irq);
347
342 free_irq(event->irq, event); 348 free_irq(event->irq, event);
343 desc = event->desc; 349 desc = event->desc;
344 if (WARN_ON(IS_ERR(desc))) 350 if (WARN_ON(IS_ERR(desc)))
@@ -571,8 +577,10 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
571 } 577 }
572 578
573 desc = acpi_get_gpiod_by_index(adev, propname, idx, &info); 579 desc = acpi_get_gpiod_by_index(adev, propname, idx, &info);
574 if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER)) 580 if (!IS_ERR(desc))
575 break; 581 break;
582 if (PTR_ERR(desc) == -EPROBE_DEFER)
583 return ERR_CAST(desc);
576 } 584 }
577 585
578 /* Then from plain _CRS GPIOs */ 586 /* Then from plain _CRS GPIOs */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 262056778f52..6a8129949333 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -32,7 +32,7 @@
32#include <linux/wait.h> 32#include <linux/wait.h>
33#include <linux/list.h> 33#include <linux/list.h>
34#include <linux/kref.h> 34#include <linux/kref.h>
35#include <linux/interval_tree.h> 35#include <linux/rbtree.h>
36#include <linux/hashtable.h> 36#include <linux/hashtable.h>
37#include <linux/dma-fence.h> 37#include <linux/dma-fence.h>
38 38
@@ -122,14 +122,6 @@ extern int amdgpu_param_buf_per_se;
122/* max number of IP instances */ 122/* max number of IP instances */
123#define AMDGPU_MAX_SDMA_INSTANCES 2 123#define AMDGPU_MAX_SDMA_INSTANCES 2
124 124
125/* max number of VMHUB */
126#define AMDGPU_MAX_VMHUBS 2
127#define AMDGPU_MMHUB 0
128#define AMDGPU_GFXHUB 1
129
130/* hardcode that limit for now */
131#define AMDGPU_VA_RESERVED_SIZE (8 << 20)
132
133/* hard reset data */ 125/* hard reset data */
134#define AMDGPU_ASIC_RESET_DATA 0x39d5e86b 126#define AMDGPU_ASIC_RESET_DATA 0x39d5e86b
135 127
@@ -312,12 +304,9 @@ struct amdgpu_gart_funcs {
312 /* set pte flags based per asic */ 304 /* set pte flags based per asic */
313 uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev, 305 uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
314 uint32_t flags); 306 uint32_t flags);
315};
316
317/* provided by the mc block */
318struct amdgpu_mc_funcs {
319 /* adjust mc addr in fb for APU case */ 307 /* adjust mc addr in fb for APU case */
320 u64 (*adjust_mc_addr)(struct amdgpu_device *adev, u64 addr); 308 u64 (*adjust_mc_addr)(struct amdgpu_device *adev, u64 addr);
309 uint32_t (*get_invalidate_req)(unsigned int vm_id);
321}; 310};
322 311
323/* provided by the ih block */ 312/* provided by the ih block */
@@ -379,7 +368,10 @@ struct amdgpu_bo_list_entry {
379 368
380struct amdgpu_bo_va_mapping { 369struct amdgpu_bo_va_mapping {
381 struct list_head list; 370 struct list_head list;
382 struct interval_tree_node it; 371 struct rb_node rb;
372 uint64_t start;
373 uint64_t last;
374 uint64_t __subtree_last;
383 uint64_t offset; 375 uint64_t offset;
384 uint64_t flags; 376 uint64_t flags;
385}; 377};
@@ -579,8 +571,6 @@ struct amdgpu_vmhub {
579 uint32_t vm_context0_cntl; 571 uint32_t vm_context0_cntl;
580 uint32_t vm_l2_pro_fault_status; 572 uint32_t vm_l2_pro_fault_status;
581 uint32_t vm_l2_pro_fault_cntl; 573 uint32_t vm_l2_pro_fault_cntl;
582 uint32_t (*get_invalidate_req)(unsigned int vm_id);
583 uint32_t (*get_vm_protection_bits)(void);
584}; 574};
585 575
586/* 576/*
@@ -618,7 +608,6 @@ struct amdgpu_mc {
618 u64 private_aperture_end; 608 u64 private_aperture_end;
619 /* protects concurrent invalidation */ 609 /* protects concurrent invalidation */
620 spinlock_t invalidate_lock; 610 spinlock_t invalidate_lock;
621 const struct amdgpu_mc_funcs *mc_funcs;
622}; 611};
623 612
624/* 613/*
@@ -1712,6 +1701,12 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
1712#define WREG32_FIELD(reg, field, val) \ 1701#define WREG32_FIELD(reg, field, val) \
1713 WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) 1702 WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
1714 1703
1704#define WREG32_FIELD_OFFSET(reg, offset, field, val) \
1705 WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
1706
1707#define WREG32_FIELD15(ip, idx, reg, field, val) \
1708 WREG32(SOC15_REG_OFFSET(ip, idx, mm##reg), (RREG32(SOC15_REG_OFFSET(ip, idx, mm##reg)) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
1709
1715/* 1710/*
1716 * BIOS helpers. 1711 * BIOS helpers.
1717 */ 1712 */
@@ -1887,12 +1882,14 @@ void amdgpu_unregister_atpx_handler(void);
1887bool amdgpu_has_atpx_dgpu_power_cntl(void); 1882bool amdgpu_has_atpx_dgpu_power_cntl(void);
1888bool amdgpu_is_atpx_hybrid(void); 1883bool amdgpu_is_atpx_hybrid(void);
1889bool amdgpu_atpx_dgpu_req_power_for_displays(void); 1884bool amdgpu_atpx_dgpu_req_power_for_displays(void);
1885bool amdgpu_has_atpx(void);
1890#else 1886#else
1891static inline void amdgpu_register_atpx_handler(void) {} 1887static inline void amdgpu_register_atpx_handler(void) {}
1892static inline void amdgpu_unregister_atpx_handler(void) {} 1888static inline void amdgpu_unregister_atpx_handler(void) {}
1893static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } 1889static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
1894static inline bool amdgpu_is_atpx_hybrid(void) { return false; } 1890static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
1895static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; } 1891static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; }
1892static inline bool amdgpu_has_atpx(void) { return false; }
1896#endif 1893#endif
1897 1894
1898/* 1895/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index f52b1bf3d3d9..ad4329922f79 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -754,6 +754,35 @@ union igp_info {
754 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9; 754 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9;
755}; 755};
756 756
757/*
758 * Return vram width from integrated system info table, if available,
759 * or 0 if not.
760 */
761int amdgpu_atombios_get_vram_width(struct amdgpu_device *adev)
762{
763 struct amdgpu_mode_info *mode_info = &adev->mode_info;
764 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
765 u16 data_offset, size;
766 union igp_info *igp_info;
767 u8 frev, crev;
768
769 /* get any igp specific overrides */
770 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size,
771 &frev, &crev, &data_offset)) {
772 igp_info = (union igp_info *)
773 (mode_info->atom_context->bios + data_offset);
774 switch (crev) {
775 case 8:
776 case 9:
777 return igp_info->info_8.ucUMAChannelNumber * 64;
778 default:
779 return 0;
780 }
781 }
782
783 return 0;
784}
785
757static void amdgpu_atombios_get_igp_ss_overrides(struct amdgpu_device *adev, 786static void amdgpu_atombios_get_igp_ss_overrides(struct amdgpu_device *adev,
758 struct amdgpu_atom_ss *ss, 787 struct amdgpu_atom_ss *ss,
759 int id) 788 int id)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 4e0f488487f3..38d0fe32e5cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -148,6 +148,8 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev);
148 148
149int amdgpu_atombios_get_gfx_info(struct amdgpu_device *adev); 149int amdgpu_atombios_get_gfx_info(struct amdgpu_device *adev);
150 150
151int amdgpu_atombios_get_vram_width(struct amdgpu_device *adev);
152
151bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev, 153bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev,
152 struct amdgpu_atom_ss *ss, 154 struct amdgpu_atom_ss *ss,
153 int id, u32 clock); 155 int id, u32 clock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 0218cea6be4d..a6649874e6ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -237,7 +237,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
237 struct amdgpu_fpriv *fpriv = filp->driver_priv; 237 struct amdgpu_fpriv *fpriv = filp->driver_priv;
238 union drm_amdgpu_bo_list *args = data; 238 union drm_amdgpu_bo_list *args = data;
239 uint32_t handle = args->in.list_handle; 239 uint32_t handle = args->in.list_handle;
240 const void __user *uptr = (const void*)(long)args->in.bo_info_ptr; 240 const void __user *uptr = (const void*)(uintptr_t)args->in.bo_info_ptr;
241 241
242 struct drm_amdgpu_bo_list_entry *info; 242 struct drm_amdgpu_bo_list_entry *info;
243 struct amdgpu_bo_list *list; 243 struct amdgpu_bo_list *list;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 97f661372a1c..ec71b9320561 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -161,7 +161,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
161 } 161 }
162 162
163 /* get chunks */ 163 /* get chunks */
164 chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks); 164 chunk_array_user = (uint64_t __user *)(uintptr_t)(cs->in.chunks);
165 if (copy_from_user(chunk_array, chunk_array_user, 165 if (copy_from_user(chunk_array, chunk_array_user,
166 sizeof(uint64_t)*cs->in.num_chunks)) { 166 sizeof(uint64_t)*cs->in.num_chunks)) {
167 ret = -EFAULT; 167 ret = -EFAULT;
@@ -181,7 +181,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
181 struct drm_amdgpu_cs_chunk user_chunk; 181 struct drm_amdgpu_cs_chunk user_chunk;
182 uint32_t __user *cdata; 182 uint32_t __user *cdata;
183 183
184 chunk_ptr = (void __user *)(unsigned long)chunk_array[i]; 184 chunk_ptr = (void __user *)(uintptr_t)chunk_array[i];
185 if (copy_from_user(&user_chunk, chunk_ptr, 185 if (copy_from_user(&user_chunk, chunk_ptr,
186 sizeof(struct drm_amdgpu_cs_chunk))) { 186 sizeof(struct drm_amdgpu_cs_chunk))) {
187 ret = -EFAULT; 187 ret = -EFAULT;
@@ -192,7 +192,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
192 p->chunks[i].length_dw = user_chunk.length_dw; 192 p->chunks[i].length_dw = user_chunk.length_dw;
193 193
194 size = p->chunks[i].length_dw; 194 size = p->chunks[i].length_dw;
195 cdata = (void __user *)(unsigned long)user_chunk.chunk_data; 195 cdata = (void __user *)(uintptr_t)user_chunk.chunk_data;
196 196
197 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); 197 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
198 if (p->chunks[i].kdata == NULL) { 198 if (p->chunks[i].kdata == NULL) {
@@ -949,7 +949,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
949 } 949 }
950 950
951 if ((chunk_ib->va_start + chunk_ib->ib_bytes) > 951 if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
952 (m->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) { 952 (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
953 DRM_ERROR("IB va_start+ib_bytes is invalid\n"); 953 DRM_ERROR("IB va_start+ib_bytes is invalid\n");
954 return -EINVAL; 954 return -EINVAL;
955 } 955 }
@@ -960,7 +960,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
960 return r; 960 return r;
961 } 961 }
962 962
963 offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE; 963 offset = m->start * AMDGPU_GPU_PAGE_SIZE;
964 kptr += chunk_ib->va_start - offset; 964 kptr += chunk_ib->va_start - offset;
965 965
966 r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib); 966 r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
@@ -1242,6 +1242,7 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1242 continue; 1242 continue;
1243 1243
1244 r = dma_fence_wait_timeout(fence, true, timeout); 1244 r = dma_fence_wait_timeout(fence, true, timeout);
1245 dma_fence_put(fence);
1245 if (r < 0) 1246 if (r < 0)
1246 return r; 1247 return r;
1247 1248
@@ -1339,7 +1340,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1339 if (fences == NULL) 1340 if (fences == NULL)
1340 return -ENOMEM; 1341 return -ENOMEM;
1341 1342
1342 fences_user = (void __user *)(unsigned long)(wait->in.fences); 1343 fences_user = (void __user *)(uintptr_t)(wait->in.fences);
1343 if (copy_from_user(fences, fences_user, 1344 if (copy_from_user(fences, fences_user,
1344 sizeof(struct drm_amdgpu_fence) * fence_count)) { 1345 sizeof(struct drm_amdgpu_fence) * fence_count)) {
1345 r = -EFAULT; 1346 r = -EFAULT;
@@ -1388,8 +1389,8 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1388 continue; 1389 continue;
1389 1390
1390 list_for_each_entry(mapping, &lobj->bo_va->valids, list) { 1391 list_for_each_entry(mapping, &lobj->bo_va->valids, list) {
1391 if (mapping->it.start > addr || 1392 if (mapping->start > addr ||
1392 addr > mapping->it.last) 1393 addr > mapping->last)
1393 continue; 1394 continue;
1394 1395
1395 *bo = lobj->bo_va->bo; 1396 *bo = lobj->bo_va->bo;
@@ -1397,8 +1398,8 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1397 } 1398 }
1398 1399
1399 list_for_each_entry(mapping, &lobj->bo_va->invalids, list) { 1400 list_for_each_entry(mapping, &lobj->bo_va->invalids, list) {
1400 if (mapping->it.start > addr || 1401 if (mapping->start > addr ||
1401 addr > mapping->it.last) 1402 addr > mapping->last)
1402 continue; 1403 continue;
1403 1404
1404 *bo = lobj->bo_va->bo; 1405 *bo = lobj->bo_va->bo;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 83dda05325b8..483660742f75 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1040,43 +1040,60 @@ static bool amdgpu_check_pot_argument(int arg)
1040 return (arg & (arg - 1)) == 0; 1040 return (arg & (arg - 1)) == 0;
1041} 1041}
1042 1042
1043static void amdgpu_get_block_size(struct amdgpu_device *adev) 1043static void amdgpu_check_block_size(struct amdgpu_device *adev)
1044{ 1044{
1045 /* from AI, asic starts to support multiple level VMPT */
1046 if (adev->asic_type >= CHIP_VEGA10) {
1047 if (amdgpu_vm_block_size != 9)
1048 dev_warn(adev->dev,
1049 "Multi-VMPT limits block size to one page!\n");
1050 amdgpu_vm_block_size = 9;
1051 return;
1052 }
1053 /* defines number of bits in page table versus page directory, 1045 /* defines number of bits in page table versus page directory,
1054 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 1046 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1055 * page table and the remaining bits are in the page directory */ 1047 * page table and the remaining bits are in the page directory */
1056 if (amdgpu_vm_block_size == -1) { 1048 if (amdgpu_vm_block_size == -1)
1057 1049 return;
1058 /* Total bits covered by PD + PTs */
1059 unsigned bits = ilog2(amdgpu_vm_size) + 18;
1060
1061 /* Make sure the PD is 4K in size up to 8GB address space.
1062 Above that split equal between PD and PTs */
1063 if (amdgpu_vm_size <= 8)
1064 amdgpu_vm_block_size = bits - 9;
1065 else
1066 amdgpu_vm_block_size = (bits + 3) / 2;
1067 1050
1068 } else if (amdgpu_vm_block_size < 9) { 1051 if (amdgpu_vm_block_size < 9) {
1069 dev_warn(adev->dev, "VM page table size (%d) too small\n", 1052 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1070 amdgpu_vm_block_size); 1053 amdgpu_vm_block_size);
1071 amdgpu_vm_block_size = 9; 1054 goto def_value;
1072 } 1055 }
1073 1056
1074 if (amdgpu_vm_block_size > 24 || 1057 if (amdgpu_vm_block_size > 24 ||
1075 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) { 1058 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1076 dev_warn(adev->dev, "VM page table size (%d) too large\n", 1059 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1077 amdgpu_vm_block_size); 1060 amdgpu_vm_block_size);
1078 amdgpu_vm_block_size = 9; 1061 goto def_value;
1079 } 1062 }
1063
1064 return;
1065
1066def_value:
1067 amdgpu_vm_block_size = -1;
1068}
1069
1070static void amdgpu_check_vm_size(struct amdgpu_device *adev)
1071{
1072 if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
1073 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1074 amdgpu_vm_size);
1075 goto def_value;
1076 }
1077
1078 if (amdgpu_vm_size < 1) {
1079 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1080 amdgpu_vm_size);
1081 goto def_value;
1082 }
1083
1084 /*
1085 * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
1086 */
1087 if (amdgpu_vm_size > 1024) {
1088 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1089 amdgpu_vm_size);
1090 goto def_value;
1091 }
1092
1093 return;
1094
1095def_value:
1096 amdgpu_vm_size = -1;
1080} 1097}
1081 1098
1082/** 1099/**
@@ -1108,28 +1125,9 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
1108 } 1125 }
1109 } 1126 }
1110 1127
1111 if (!amdgpu_check_pot_argument(amdgpu_vm_size)) { 1128 amdgpu_check_vm_size(adev);
1112 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1113 amdgpu_vm_size);
1114 amdgpu_vm_size = 8;
1115 }
1116 1129
1117 if (amdgpu_vm_size < 1) { 1130 amdgpu_check_block_size(adev);
1118 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1119 amdgpu_vm_size);
1120 amdgpu_vm_size = 8;
1121 }
1122
1123 /*
1124 * Max GPUVM size for Cayman, SI and CI are 40 bits.
1125 */
1126 if (amdgpu_vm_size > 1024) {
1127 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1128 amdgpu_vm_size);
1129 amdgpu_vm_size = 8;
1130 }
1131
1132 amdgpu_get_block_size(adev);
1133 1131
1134 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 || 1132 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
1135 !amdgpu_check_pot_argument(amdgpu_vram_page_split))) { 1133 !amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
@@ -2249,9 +2247,10 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2249 } 2247 }
2250 2248
2251 r = amdgpu_resume(adev); 2249 r = amdgpu_resume(adev);
2252 if (r) 2250 if (r) {
2253 DRM_ERROR("amdgpu_resume failed (%d).\n", r); 2251 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
2254 2252 return r;
2253 }
2255 amdgpu_fence_driver_resume(adev); 2254 amdgpu_fence_driver_resume(adev);
2256 2255
2257 if (resume) { 2256 if (resume) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index ce15721cadda..96926a221bd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -614,6 +614,12 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
614 return ERR_PTR(-ENOENT); 614 return ERR_PTR(-ENOENT);
615 } 615 }
616 616
617 /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
618 if (obj->import_attach) {
619 DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
620 return ERR_PTR(-EINVAL);
621 }
622
617 amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL); 623 amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
618 if (amdgpu_fb == NULL) { 624 if (amdgpu_fb == NULL) {
619 drm_gem_object_unreference_unlocked(obj); 625 drm_gem_object_unreference_unlocked(obj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 400917fd7486..4e0f7d2d87f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -86,7 +86,7 @@ int amdgpu_runtime_pm = -1;
86unsigned amdgpu_ip_block_mask = 0xffffffff; 86unsigned amdgpu_ip_block_mask = 0xffffffff;
87int amdgpu_bapm = -1; 87int amdgpu_bapm = -1;
88int amdgpu_deep_color = 0; 88int amdgpu_deep_color = 0;
89int amdgpu_vm_size = 64; 89int amdgpu_vm_size = -1;
90int amdgpu_vm_block_size = -1; 90int amdgpu_vm_block_size = -1;
91int amdgpu_vm_fault_stop = 0; 91int amdgpu_vm_fault_stop = 0;
92int amdgpu_vm_debug = 0; 92int amdgpu_vm_debug = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index f85520d4e711..03a9c5cad222 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -717,7 +717,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
717 switch (args->op) { 717 switch (args->op) {
718 case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: { 718 case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
719 struct drm_amdgpu_gem_create_in info; 719 struct drm_amdgpu_gem_create_in info;
720 void __user *out = (void __user *)(long)args->value; 720 void __user *out = (void __user *)(uintptr_t)args->value;
721 721
722 info.bo_size = robj->gem_base.size; 722 info.bo_size = robj->gem_base.size;
723 info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT; 723 info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
@@ -729,6 +729,11 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
729 break; 729 break;
730 } 730 }
731 case AMDGPU_GEM_OP_SET_PLACEMENT: 731 case AMDGPU_GEM_OP_SET_PLACEMENT:
732 if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
733 r = -EINVAL;
734 amdgpu_bo_unreserve(robj);
735 break;
736 }
732 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) { 737 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
733 r = -EPERM; 738 r = -EPERM;
734 amdgpu_bo_unreserve(robj); 739 amdgpu_bo_unreserve(robj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 13b487235a8b..a6b7e367a860 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -316,9 +316,10 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
316 return -EINVAL; 316 return -EINVAL;
317 317
318 if (!adev->irq.client[client_id].sources) { 318 if (!adev->irq.client[client_id].sources) {
319 adev->irq.client[client_id].sources = kcalloc(AMDGPU_MAX_IRQ_SRC_ID, 319 adev->irq.client[client_id].sources =
320 sizeof(struct amdgpu_irq_src), 320 kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
321 GFP_KERNEL); 321 sizeof(struct amdgpu_irq_src *),
322 GFP_KERNEL);
322 if (!adev->irq.client[client_id].sources) 323 if (!adev->irq.client[client_id].sources)
323 return -ENOMEM; 324 return -ENOMEM;
324 } 325 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index dfb029ab3448..832be632478f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -36,12 +36,6 @@
36#include <linux/pm_runtime.h> 36#include <linux/pm_runtime.h>
37#include "amdgpu_amdkfd.h" 37#include "amdgpu_amdkfd.h"
38 38
39#if defined(CONFIG_VGA_SWITCHEROO)
40bool amdgpu_has_atpx(void);
41#else
42static inline bool amdgpu_has_atpx(void) { return false; }
43#endif
44
45/** 39/**
46 * amdgpu_driver_unload_kms - Main unload function for KMS. 40 * amdgpu_driver_unload_kms - Main unload function for KMS.
47 * 41 *
@@ -243,7 +237,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
243 struct amdgpu_device *adev = dev->dev_private; 237 struct amdgpu_device *adev = dev->dev_private;
244 struct drm_amdgpu_info *info = data; 238 struct drm_amdgpu_info *info = data;
245 struct amdgpu_mode_info *minfo = &adev->mode_info; 239 struct amdgpu_mode_info *minfo = &adev->mode_info;
246 void __user *out = (void __user *)(long)info->return_pointer; 240 void __user *out = (void __user *)(uintptr_t)info->return_pointer;
247 uint32_t size = info->return_size; 241 uint32_t size = info->return_size;
248 struct drm_crtc *crtc; 242 struct drm_crtc *crtc;
249 uint32_t ui32 = 0; 243 uint32_t ui32 = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 7ea3cacf9f9f..38f739fb727b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -31,6 +31,7 @@
31#include <linux/firmware.h> 31#include <linux/firmware.h>
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/mmu_notifier.h> 33#include <linux/mmu_notifier.h>
34#include <linux/interval_tree.h>
34#include <drm/drmP.h> 35#include <drm/drmP.h>
35#include <drm/drm.h> 36#include <drm/drm.h>
36 37
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 5aac350b007f..cb89fff863c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -122,20 +122,19 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
122 122
123 if (domain & AMDGPU_GEM_DOMAIN_VRAM) { 123 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
124 unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT; 124 unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
125 unsigned lpfn = 0;
126
127 /* This forces a reallocation if the flag wasn't set before */
128 if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
129 lpfn = adev->mc.real_vram_size >> PAGE_SHIFT;
130 125
131 places[c].fpfn = 0; 126 places[c].fpfn = 0;
132 places[c].lpfn = lpfn; 127 places[c].lpfn = 0;
133 places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | 128 places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
134 TTM_PL_FLAG_VRAM; 129 TTM_PL_FLAG_VRAM;
130
135 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) 131 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
136 places[c].lpfn = visible_pfn; 132 places[c].lpfn = visible_pfn;
137 else 133 else
138 places[c].flags |= TTM_PL_FLAG_TOPDOWN; 134 places[c].flags |= TTM_PL_FLAG_TOPDOWN;
135
136 if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
137 places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
139 c++; 138 c++;
140 } 139 }
141 140
@@ -651,6 +650,10 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
651 if (WARN_ON_ONCE(min_offset > max_offset)) 650 if (WARN_ON_ONCE(min_offset > max_offset))
652 return -EINVAL; 651 return -EINVAL;
653 652
653 /* A shared bo cannot be migrated to VRAM */
654 if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM))
655 return -EINVAL;
656
654 if (bo->pin_count) { 657 if (bo->pin_count) {
655 uint32_t mem_type = bo->tbo.mem.mem_type; 658 uint32_t mem_type = bo->tbo.mem.mem_type;
656 659
@@ -928,8 +931,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
928 size = bo->mem.num_pages << PAGE_SHIFT; 931 size = bo->mem.num_pages << PAGE_SHIFT;
929 offset = bo->mem.start << PAGE_SHIFT; 932 offset = bo->mem.start << PAGE_SHIFT;
930 /* TODO: figure out how to map scattered VRAM to the CPU */ 933 /* TODO: figure out how to map scattered VRAM to the CPU */
931 if ((offset + size) <= adev->mc.visible_vram_size && 934 if ((offset + size) <= adev->mc.visible_vram_size)
932 (abo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS))
933 return 0; 935 return 0;
934 936
935 /* Can't move a pinned BO to visible VRAM */ 937 /* Can't move a pinned BO to visible VRAM */
@@ -937,7 +939,6 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
937 return -EINVAL; 939 return -EINVAL;
938 940
939 /* hurrah the memory is not visible ! */ 941 /* hurrah the memory is not visible ! */
940 abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
941 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM); 942 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
942 lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; 943 lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
943 for (i = 0; i < abo->placement.num_placement; i++) { 944 for (i = 0; i < abo->placement.num_placement; i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 4731015f6101..ed6e5799016e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -130,7 +130,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
130 130
131 while (*((unsigned int *)psp->fence_buf) != index) { 131 while (*((unsigned int *)psp->fence_buf) != index) {
132 msleep(1); 132 msleep(1);
133 }; 133 }
134 134
135 amdgpu_bo_free_kernel(&cmd_buf_bo, 135 amdgpu_bo_free_kernel(&cmd_buf_bo,
136 &cmd_buf_mc_addr, 136 &cmd_buf_mc_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index a87de18160a8..ee9d0f346d75 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -226,8 +226,8 @@ TRACE_EVENT(amdgpu_vm_bo_map,
226 226
227 TP_fast_assign( 227 TP_fast_assign(
228 __entry->bo = bo_va ? bo_va->bo : NULL; 228 __entry->bo = bo_va ? bo_va->bo : NULL;
229 __entry->start = mapping->it.start; 229 __entry->start = mapping->start;
230 __entry->last = mapping->it.last; 230 __entry->last = mapping->last;
231 __entry->offset = mapping->offset; 231 __entry->offset = mapping->offset;
232 __entry->flags = mapping->flags; 232 __entry->flags = mapping->flags;
233 ), 233 ),
@@ -250,8 +250,8 @@ TRACE_EVENT(amdgpu_vm_bo_unmap,
250 250
251 TP_fast_assign( 251 TP_fast_assign(
252 __entry->bo = bo_va->bo; 252 __entry->bo = bo_va->bo;
253 __entry->start = mapping->it.start; 253 __entry->start = mapping->start;
254 __entry->last = mapping->it.last; 254 __entry->last = mapping->last;
255 __entry->offset = mapping->offset; 255 __entry->offset = mapping->offset;
256 __entry->flags = mapping->flags; 256 __entry->flags = mapping->flags;
257 ), 257 ),
@@ -270,8 +270,8 @@ DECLARE_EVENT_CLASS(amdgpu_vm_mapping,
270 ), 270 ),
271 271
272 TP_fast_assign( 272 TP_fast_assign(
273 __entry->soffset = mapping->it.start; 273 __entry->soffset = mapping->start;
274 __entry->eoffset = mapping->it.last + 1; 274 __entry->eoffset = mapping->last + 1;
275 __entry->flags = mapping->flags; 275 __entry->flags = mapping->flags;
276 ), 276 ),
277 TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x", 277 TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 244bb9aacf86..35d53a0d9ba6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -529,40 +529,12 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
529 case TTM_PL_TT: 529 case TTM_PL_TT:
530 break; 530 break;
531 case TTM_PL_VRAM: 531 case TTM_PL_VRAM:
532 if (mem->start == AMDGPU_BO_INVALID_OFFSET)
533 return -EINVAL;
534
535 mem->bus.offset = mem->start << PAGE_SHIFT; 532 mem->bus.offset = mem->start << PAGE_SHIFT;
536 /* check if it's visible */ 533 /* check if it's visible */
537 if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size) 534 if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size)
538 return -EINVAL; 535 return -EINVAL;
539 mem->bus.base = adev->mc.aper_base; 536 mem->bus.base = adev->mc.aper_base;
540 mem->bus.is_iomem = true; 537 mem->bus.is_iomem = true;
541#ifdef __alpha__
542 /*
543 * Alpha: use bus.addr to hold the ioremap() return,
544 * so we can modify bus.base below.
545 */
546 if (mem->placement & TTM_PL_FLAG_WC)
547 mem->bus.addr =
548 ioremap_wc(mem->bus.base + mem->bus.offset,
549 mem->bus.size);
550 else
551 mem->bus.addr =
552 ioremap_nocache(mem->bus.base + mem->bus.offset,
553 mem->bus.size);
554 if (!mem->bus.addr)
555 return -ENOMEM;
556
557 /*
558 * Alpha: Use just the bus offset plus
559 * the hose/domain memory base for bus.base.
560 * It then can be used to build PTEs for VRAM
561 * access, as done in ttm_bo_vm_fault().
562 */
563 mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
564 adev->ddev->hose->dense_mem_base;
565#endif
566 break; 538 break;
567 default: 539 default:
568 return -EINVAL; 540 return -EINVAL;
@@ -574,6 +546,18 @@ static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
574{ 546{
575} 547}
576 548
549static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
550 unsigned long page_offset)
551{
552 struct drm_mm_node *mm = bo->mem.mm_node;
553 uint64_t size = mm->size;
554 uint64_t offset = page_offset;
555
556 page_offset = do_div(offset, size);
557 mm += offset;
558 return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + page_offset;
559}
560
577/* 561/*
578 * TTM backend functions. 562 * TTM backend functions.
579 */ 563 */
@@ -1089,6 +1073,7 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
1089 .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify, 1073 .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
1090 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, 1074 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1091 .io_mem_free = &amdgpu_ttm_io_mem_free, 1075 .io_mem_free = &amdgpu_ttm_io_mem_free,
1076 .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1092}; 1077};
1093 1078
1094int amdgpu_ttm_init(struct amdgpu_device *adev) 1079int amdgpu_ttm_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 0b92dd0c1d70..2ca09f111f08 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -741,10 +741,10 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
741 741
742 start = amdgpu_bo_gpu_offset(bo); 742 start = amdgpu_bo_gpu_offset(bo);
743 743
744 end = (mapping->it.last + 1 - mapping->it.start); 744 end = (mapping->last + 1 - mapping->start);
745 end = end * AMDGPU_GPU_PAGE_SIZE + start; 745 end = end * AMDGPU_GPU_PAGE_SIZE + start;
746 746
747 addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE; 747 addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
748 start += addr; 748 start += addr;
749 749
750 amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0, 750 amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 0184197eb000..c853400805d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -595,13 +595,13 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
595 } 595 }
596 596
597 if ((addr + (uint64_t)size) > 597 if ((addr + (uint64_t)size) >
598 ((uint64_t)mapping->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) { 598 (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
599 DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n", 599 DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
600 addr, lo, hi); 600 addr, lo, hi);
601 return -EINVAL; 601 return -EINVAL;
602 } 602 }
603 603
604 addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE; 604 addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
605 addr += amdgpu_bo_gpu_offset(bo); 605 addr += amdgpu_bo_gpu_offset(bo);
606 addr -= ((uint64_t)size) * ((uint64_t)index); 606 addr -= ((uint64_t)size) * ((uint64_t)index);
607 607
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index ecef35a1fe33..ba8b8ae6234f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -122,9 +122,7 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
122 122
123 mutex_lock(&adev->virt.lock_kiq); 123 mutex_lock(&adev->virt.lock_kiq);
124 amdgpu_ring_alloc(ring, 32); 124 amdgpu_ring_alloc(ring, 32);
125 amdgpu_ring_emit_hdp_flush(ring);
126 amdgpu_ring_emit_rreg(ring, reg); 125 amdgpu_ring_emit_rreg(ring, reg);
127 amdgpu_ring_emit_hdp_invalidate(ring);
128 amdgpu_fence_emit(ring, &f); 126 amdgpu_fence_emit(ring, &f);
129 amdgpu_ring_commit(ring); 127 amdgpu_ring_commit(ring);
130 mutex_unlock(&adev->virt.lock_kiq); 128 mutex_unlock(&adev->virt.lock_kiq);
@@ -150,9 +148,7 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
150 148
151 mutex_lock(&adev->virt.lock_kiq); 149 mutex_lock(&adev->virt.lock_kiq);
152 amdgpu_ring_alloc(ring, 32); 150 amdgpu_ring_alloc(ring, 32);
153 amdgpu_ring_emit_hdp_flush(ring);
154 amdgpu_ring_emit_wreg(ring, reg, v); 151 amdgpu_ring_emit_wreg(ring, reg, v);
155 amdgpu_ring_emit_hdp_invalidate(ring);
156 amdgpu_fence_emit(ring, &f); 152 amdgpu_fence_emit(ring, &f);
157 amdgpu_ring_commit(ring); 153 amdgpu_ring_commit(ring);
158 mutex_unlock(&adev->virt.lock_kiq); 154 mutex_unlock(&adev->virt.lock_kiq);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 0235d7933efd..7ed5302b511a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -26,6 +26,7 @@
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include <linux/dma-fence-array.h> 28#include <linux/dma-fence-array.h>
29#include <linux/interval_tree_generic.h>
29#include <drm/drmP.h> 30#include <drm/drmP.h>
30#include <drm/amdgpu_drm.h> 31#include <drm/amdgpu_drm.h>
31#include "amdgpu.h" 32#include "amdgpu.h"
@@ -51,6 +52,15 @@
51 * SI supports 16. 52 * SI supports 16.
52 */ 53 */
53 54
55#define START(node) ((node)->start)
56#define LAST(node) ((node)->last)
57
58INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
59 START, LAST, static, amdgpu_vm_it)
60
61#undef START
62#undef LAST
63
54/* Local structure. Encapsulate some VM table update parameters to reduce 64/* Local structure. Encapsulate some VM table update parameters to reduce
55 * the number of function parameters 65 * the number of function parameters
56 */ 66 */
@@ -90,13 +100,14 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
90 if (level == 0) 100 if (level == 0)
91 /* For the root directory */ 101 /* For the root directory */
92 return adev->vm_manager.max_pfn >> 102 return adev->vm_manager.max_pfn >>
93 (amdgpu_vm_block_size * adev->vm_manager.num_level); 103 (adev->vm_manager.block_size *
104 adev->vm_manager.num_level);
94 else if (level == adev->vm_manager.num_level) 105 else if (level == adev->vm_manager.num_level)
95 /* For the page tables on the leaves */ 106 /* For the page tables on the leaves */
96 return AMDGPU_VM_PTE_COUNT; 107 return AMDGPU_VM_PTE_COUNT(adev);
97 else 108 else
98 /* Everything in between */ 109 /* Everything in between */
99 return 1 << amdgpu_vm_block_size; 110 return 1 << adev->vm_manager.block_size;
100} 111}
101 112
102/** 113/**
@@ -261,7 +272,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
261 unsigned level) 272 unsigned level)
262{ 273{
263 unsigned shift = (adev->vm_manager.num_level - level) * 274 unsigned shift = (adev->vm_manager.num_level - level) *
264 amdgpu_vm_block_size; 275 adev->vm_manager.block_size;
265 unsigned pt_idx, from, to; 276 unsigned pt_idx, from, to;
266 int r; 277 int r;
267 278
@@ -365,11 +376,19 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
365 return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0); 376 return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0);
366} 377}
367 378
368static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev, 379/**
369 struct amdgpu_vm_id *id) 380 * amdgpu_vm_had_gpu_reset - check if reset occured since last use
381 *
382 * @adev: amdgpu_device pointer
383 * @id: VMID structure
384 *
385 * Check if GPU reset occured since last use of the VMID.
386 */
387static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device *adev,
388 struct amdgpu_vm_id *id)
370{ 389{
371 return id->current_gpu_reset_count != 390 return id->current_gpu_reset_count !=
372 atomic_read(&adev->gpu_reset_counter) ? true : false; 391 atomic_read(&adev->gpu_reset_counter);
373} 392}
374 393
375/** 394/**
@@ -455,7 +474,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
455 /* Check all the prerequisites to using this VMID */ 474 /* Check all the prerequisites to using this VMID */
456 if (!id) 475 if (!id)
457 continue; 476 continue;
458 if (amdgpu_vm_is_gpu_reset(adev, id)) 477 if (amdgpu_vm_had_gpu_reset(adev, id))
459 continue; 478 continue;
460 479
461 if (atomic64_read(&id->owner) != vm->client_id) 480 if (atomic64_read(&id->owner) != vm->client_id)
@@ -483,7 +502,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
483 if (r) 502 if (r)
484 goto error; 503 goto error;
485 504
486 id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
487 list_move_tail(&id->list, &adev->vm_manager.ids_lru); 505 list_move_tail(&id->list, &adev->vm_manager.ids_lru);
488 vm->ids[ring->idx] = id; 506 vm->ids[ring->idx] = id;
489 507
@@ -504,9 +522,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
504 if (r) 522 if (r)
505 goto error; 523 goto error;
506 524
507 dma_fence_put(id->first);
508 id->first = dma_fence_get(fence);
509
510 dma_fence_put(id->last_flush); 525 dma_fence_put(id->last_flush);
511 id->last_flush = NULL; 526 id->last_flush = NULL;
512 527
@@ -557,8 +572,8 @@ static u64 amdgpu_vm_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
557{ 572{
558 u64 addr = mc_addr; 573 u64 addr = mc_addr;
559 574
560 if (adev->mc.mc_funcs && adev->mc.mc_funcs->adjust_mc_addr) 575 if (adev->gart.gart_funcs->adjust_mc_addr)
561 addr = adev->mc.mc_funcs->adjust_mc_addr(adev, addr); 576 addr = adev->gart.gart_funcs->adjust_mc_addr(adev, addr);
562 577
563 return addr; 578 return addr;
564} 579}
@@ -583,60 +598,62 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
583 id->gws_size != job->gws_size || 598 id->gws_size != job->gws_size ||
584 id->oa_base != job->oa_base || 599 id->oa_base != job->oa_base ||
585 id->oa_size != job->oa_size); 600 id->oa_size != job->oa_size);
601 bool vm_flush_needed = job->vm_needs_flush ||
602 amdgpu_vm_ring_has_compute_vm_bug(ring);
603 unsigned patch_offset = 0;
586 int r; 604 int r;
587 605
588 if (job->vm_needs_flush || gds_switch_needed || 606 if (amdgpu_vm_had_gpu_reset(adev, id)) {
589 amdgpu_vm_is_gpu_reset(adev, id) || 607 gds_switch_needed = true;
590 amdgpu_vm_ring_has_compute_vm_bug(ring)) { 608 vm_flush_needed = true;
591 unsigned patch_offset = 0; 609 }
592 610
593 if (ring->funcs->init_cond_exec) 611 if (!vm_flush_needed && !gds_switch_needed)
594 patch_offset = amdgpu_ring_init_cond_exec(ring); 612 return 0;
595 613
596 if (ring->funcs->emit_pipeline_sync && 614 if (ring->funcs->init_cond_exec)
597 (job->vm_needs_flush || gds_switch_needed || 615 patch_offset = amdgpu_ring_init_cond_exec(ring);
598 amdgpu_vm_ring_has_compute_vm_bug(ring)))
599 amdgpu_ring_emit_pipeline_sync(ring);
600 616
601 if (ring->funcs->emit_vm_flush && (job->vm_needs_flush || 617 if (ring->funcs->emit_pipeline_sync)
602 amdgpu_vm_is_gpu_reset(adev, id))) { 618 amdgpu_ring_emit_pipeline_sync(ring);
603 struct dma_fence *fence;
604 u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr);
605 619
606 trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id); 620 if (ring->funcs->emit_vm_flush && vm_flush_needed) {
607 amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr); 621 u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr);
622 struct dma_fence *fence;
608 623
609 r = amdgpu_fence_emit(ring, &fence); 624 trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id);
610 if (r) 625 amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr);
611 return r;
612 626
613 mutex_lock(&adev->vm_manager.lock); 627 r = amdgpu_fence_emit(ring, &fence);
614 dma_fence_put(id->last_flush); 628 if (r)
615 id->last_flush = fence; 629 return r;
616 mutex_unlock(&adev->vm_manager.lock);
617 }
618 630
619 if (gds_switch_needed) { 631 mutex_lock(&adev->vm_manager.lock);
620 id->gds_base = job->gds_base; 632 dma_fence_put(id->last_flush);
621 id->gds_size = job->gds_size; 633 id->last_flush = fence;
622 id->gws_base = job->gws_base; 634 mutex_unlock(&adev->vm_manager.lock);
623 id->gws_size = job->gws_size; 635 }
624 id->oa_base = job->oa_base;
625 id->oa_size = job->oa_size;
626 amdgpu_ring_emit_gds_switch(ring, job->vm_id,
627 job->gds_base, job->gds_size,
628 job->gws_base, job->gws_size,
629 job->oa_base, job->oa_size);
630 }
631 636
632 if (ring->funcs->patch_cond_exec) 637 if (gds_switch_needed) {
633 amdgpu_ring_patch_cond_exec(ring, patch_offset); 638 id->gds_base = job->gds_base;
639 id->gds_size = job->gds_size;
640 id->gws_base = job->gws_base;
641 id->gws_size = job->gws_size;
642 id->oa_base = job->oa_base;
643 id->oa_size = job->oa_size;
644 amdgpu_ring_emit_gds_switch(ring, job->vm_id, job->gds_base,
645 job->gds_size, job->gws_base,
646 job->gws_size, job->oa_base,
647 job->oa_size);
648 }
634 649
635 /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */ 650 if (ring->funcs->patch_cond_exec)
636 if (ring->funcs->emit_switch_buffer) { 651 amdgpu_ring_patch_cond_exec(ring, patch_offset);
637 amdgpu_ring_emit_switch_buffer(ring); 652
638 amdgpu_ring_emit_switch_buffer(ring); 653 /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
639 } 654 if (ring->funcs->emit_switch_buffer) {
655 amdgpu_ring_emit_switch_buffer(ring);
656 amdgpu_ring_emit_switch_buffer(ring);
640 } 657 }
641 return 0; 658 return 0;
642} 659}
@@ -960,7 +977,7 @@ static struct amdgpu_bo *amdgpu_vm_get_pt(struct amdgpu_pte_update_params *p,
960 unsigned idx, level = p->adev->vm_manager.num_level; 977 unsigned idx, level = p->adev->vm_manager.num_level;
961 978
962 while (entry->entries) { 979 while (entry->entries) {
963 idx = addr >> (amdgpu_vm_block_size * level--); 980 idx = addr >> (p->adev->vm_manager.block_size * level--);
964 idx %= amdgpu_bo_size(entry->bo) / 8; 981 idx %= amdgpu_bo_size(entry->bo) / 8;
965 entry = &entry->entries[idx]; 982 entry = &entry->entries[idx];
966 } 983 }
@@ -987,7 +1004,8 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
987 uint64_t start, uint64_t end, 1004 uint64_t start, uint64_t end,
988 uint64_t dst, uint64_t flags) 1005 uint64_t dst, uint64_t flags)
989{ 1006{
990 const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; 1007 struct amdgpu_device *adev = params->adev;
1008 const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
991 1009
992 uint64_t cur_pe_start, cur_nptes, cur_dst; 1010 uint64_t cur_pe_start, cur_nptes, cur_dst;
993 uint64_t addr; /* next GPU address to be updated */ 1011 uint64_t addr; /* next GPU address to be updated */
@@ -1011,7 +1029,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1011 if ((addr & ~mask) == (end & ~mask)) 1029 if ((addr & ~mask) == (end & ~mask))
1012 nptes = end - addr; 1030 nptes = end - addr;
1013 else 1031 else
1014 nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); 1032 nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
1015 1033
1016 cur_pe_start = amdgpu_bo_gpu_offset(pt); 1034 cur_pe_start = amdgpu_bo_gpu_offset(pt);
1017 cur_pe_start += (addr & mask) * 8; 1035 cur_pe_start += (addr & mask) * 8;
@@ -1039,7 +1057,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1039 if ((addr & ~mask) == (end & ~mask)) 1057 if ((addr & ~mask) == (end & ~mask))
1040 nptes = end - addr; 1058 nptes = end - addr;
1041 else 1059 else
1042 nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); 1060 nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
1043 1061
1044 next_pe_start = amdgpu_bo_gpu_offset(pt); 1062 next_pe_start = amdgpu_bo_gpu_offset(pt);
1045 next_pe_start += (addr & mask) * 8; 1063 next_pe_start += (addr & mask) * 8;
@@ -1186,7 +1204,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1186 * reserve space for one command every (1 << BLOCK_SIZE) 1204 * reserve space for one command every (1 << BLOCK_SIZE)
1187 * entries or 2k dwords (whatever is smaller) 1205 * entries or 2k dwords (whatever is smaller)
1188 */ 1206 */
1189 ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1; 1207 ncmds = (nptes >> min(adev->vm_manager.block_size, 11u)) + 1;
1190 1208
1191 /* padding, etc. */ 1209 /* padding, etc. */
1192 ndw = 64; 1210 ndw = 64;
@@ -1301,7 +1319,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1301 struct drm_mm_node *nodes, 1319 struct drm_mm_node *nodes,
1302 struct dma_fence **fence) 1320 struct dma_fence **fence)
1303{ 1321{
1304 uint64_t pfn, src = 0, start = mapping->it.start; 1322 uint64_t pfn, src = 0, start = mapping->start;
1305 int r; 1323 int r;
1306 1324
1307 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here 1325 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -1353,7 +1371,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1353 } 1371 }
1354 addr += pfn << PAGE_SHIFT; 1372 addr += pfn << PAGE_SHIFT;
1355 1373
1356 last = min((uint64_t)mapping->it.last, start + max_entries - 1); 1374 last = min((uint64_t)mapping->last, start + max_entries - 1);
1357 r = amdgpu_vm_bo_update_mapping(adev, exclusive, 1375 r = amdgpu_vm_bo_update_mapping(adev, exclusive,
1358 src, pages_addr, vm, 1376 src, pages_addr, vm,
1359 start, last, flags, addr, 1377 start, last, flags, addr,
@@ -1368,7 +1386,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1368 } 1386 }
1369 start = last + 1; 1387 start = last + 1;
1370 1388
1371 } while (unlikely(start != mapping->it.last + 1)); 1389 } while (unlikely(start != mapping->last + 1));
1372 1390
1373 return 0; 1391 return 0;
1374} 1392}
@@ -1518,7 +1536,7 @@ static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1518 if (fence) 1536 if (fence)
1519 dma_fence_wait(fence, false); 1537 dma_fence_wait(fence, false);
1520 1538
1521 amdgpu_vm_prt_put(cb->adev); 1539 amdgpu_vm_prt_put(adev);
1522 } else { 1540 } else {
1523 cb->adev = adev; 1541 cb->adev = adev;
1524 if (!fence || dma_fence_add_callback(fence, &cb->cb, 1542 if (!fence || dma_fence_add_callback(fence, &cb->cb,
@@ -1724,9 +1742,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1724 uint64_t saddr, uint64_t offset, 1742 uint64_t saddr, uint64_t offset,
1725 uint64_t size, uint64_t flags) 1743 uint64_t size, uint64_t flags)
1726{ 1744{
1727 struct amdgpu_bo_va_mapping *mapping; 1745 struct amdgpu_bo_va_mapping *mapping, *tmp;
1728 struct amdgpu_vm *vm = bo_va->vm; 1746 struct amdgpu_vm *vm = bo_va->vm;
1729 struct interval_tree_node *it;
1730 uint64_t eaddr; 1747 uint64_t eaddr;
1731 1748
1732 /* validate the parameters */ 1749 /* validate the parameters */
@@ -1743,14 +1760,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1743 saddr /= AMDGPU_GPU_PAGE_SIZE; 1760 saddr /= AMDGPU_GPU_PAGE_SIZE;
1744 eaddr /= AMDGPU_GPU_PAGE_SIZE; 1761 eaddr /= AMDGPU_GPU_PAGE_SIZE;
1745 1762
1746 it = interval_tree_iter_first(&vm->va, saddr, eaddr); 1763 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1747 if (it) { 1764 if (tmp) {
1748 struct amdgpu_bo_va_mapping *tmp;
1749 tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
1750 /* bo and tmp overlap, invalid addr */ 1765 /* bo and tmp overlap, invalid addr */
1751 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " 1766 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1752 "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, 1767 "0x%010Lx-0x%010Lx\n", bo_va->bo, saddr, eaddr,
1753 tmp->it.start, tmp->it.last + 1); 1768 tmp->start, tmp->last + 1);
1754 return -EINVAL; 1769 return -EINVAL;
1755 } 1770 }
1756 1771
@@ -1759,13 +1774,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1759 return -ENOMEM; 1774 return -ENOMEM;
1760 1775
1761 INIT_LIST_HEAD(&mapping->list); 1776 INIT_LIST_HEAD(&mapping->list);
1762 mapping->it.start = saddr; 1777 mapping->start = saddr;
1763 mapping->it.last = eaddr; 1778 mapping->last = eaddr;
1764 mapping->offset = offset; 1779 mapping->offset = offset;
1765 mapping->flags = flags; 1780 mapping->flags = flags;
1766 1781
1767 list_add(&mapping->list, &bo_va->invalids); 1782 list_add(&mapping->list, &bo_va->invalids);
1768 interval_tree_insert(&mapping->it, &vm->va); 1783 amdgpu_vm_it_insert(mapping, &vm->va);
1769 1784
1770 if (flags & AMDGPU_PTE_PRT) 1785 if (flags & AMDGPU_PTE_PRT)
1771 amdgpu_vm_prt_get(adev); 1786 amdgpu_vm_prt_get(adev);
@@ -1823,13 +1838,13 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1823 saddr /= AMDGPU_GPU_PAGE_SIZE; 1838 saddr /= AMDGPU_GPU_PAGE_SIZE;
1824 eaddr /= AMDGPU_GPU_PAGE_SIZE; 1839 eaddr /= AMDGPU_GPU_PAGE_SIZE;
1825 1840
1826 mapping->it.start = saddr; 1841 mapping->start = saddr;
1827 mapping->it.last = eaddr; 1842 mapping->last = eaddr;
1828 mapping->offset = offset; 1843 mapping->offset = offset;
1829 mapping->flags = flags; 1844 mapping->flags = flags;
1830 1845
1831 list_add(&mapping->list, &bo_va->invalids); 1846 list_add(&mapping->list, &bo_va->invalids);
1832 interval_tree_insert(&mapping->it, &vm->va); 1847 amdgpu_vm_it_insert(mapping, &vm->va);
1833 1848
1834 if (flags & AMDGPU_PTE_PRT) 1849 if (flags & AMDGPU_PTE_PRT)
1835 amdgpu_vm_prt_get(adev); 1850 amdgpu_vm_prt_get(adev);
@@ -1860,7 +1875,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1860 saddr /= AMDGPU_GPU_PAGE_SIZE; 1875 saddr /= AMDGPU_GPU_PAGE_SIZE;
1861 1876
1862 list_for_each_entry(mapping, &bo_va->valids, list) { 1877 list_for_each_entry(mapping, &bo_va->valids, list) {
1863 if (mapping->it.start == saddr) 1878 if (mapping->start == saddr)
1864 break; 1879 break;
1865 } 1880 }
1866 1881
@@ -1868,7 +1883,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1868 valid = false; 1883 valid = false;
1869 1884
1870 list_for_each_entry(mapping, &bo_va->invalids, list) { 1885 list_for_each_entry(mapping, &bo_va->invalids, list) {
1871 if (mapping->it.start == saddr) 1886 if (mapping->start == saddr)
1872 break; 1887 break;
1873 } 1888 }
1874 1889
@@ -1877,7 +1892,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1877 } 1892 }
1878 1893
1879 list_del(&mapping->list); 1894 list_del(&mapping->list);
1880 interval_tree_remove(&mapping->it, &vm->va); 1895 amdgpu_vm_it_remove(mapping, &vm->va);
1881 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1896 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1882 1897
1883 if (valid) 1898 if (valid)
@@ -1905,7 +1920,6 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
1905 uint64_t saddr, uint64_t size) 1920 uint64_t saddr, uint64_t size)
1906{ 1921{
1907 struct amdgpu_bo_va_mapping *before, *after, *tmp, *next; 1922 struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
1908 struct interval_tree_node *it;
1909 LIST_HEAD(removed); 1923 LIST_HEAD(removed);
1910 uint64_t eaddr; 1924 uint64_t eaddr;
1911 1925
@@ -1927,43 +1941,42 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
1927 INIT_LIST_HEAD(&after->list); 1941 INIT_LIST_HEAD(&after->list);
1928 1942
1929 /* Now gather all removed mappings */ 1943 /* Now gather all removed mappings */
1930 it = interval_tree_iter_first(&vm->va, saddr, eaddr); 1944 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1931 while (it) { 1945 while (tmp) {
1932 tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
1933 it = interval_tree_iter_next(it, saddr, eaddr);
1934
1935 /* Remember mapping split at the start */ 1946 /* Remember mapping split at the start */
1936 if (tmp->it.start < saddr) { 1947 if (tmp->start < saddr) {
1937 before->it.start = tmp->it.start; 1948 before->start = tmp->start;
1938 before->it.last = saddr - 1; 1949 before->last = saddr - 1;
1939 before->offset = tmp->offset; 1950 before->offset = tmp->offset;
1940 before->flags = tmp->flags; 1951 before->flags = tmp->flags;
1941 list_add(&before->list, &tmp->list); 1952 list_add(&before->list, &tmp->list);
1942 } 1953 }
1943 1954
1944 /* Remember mapping split at the end */ 1955 /* Remember mapping split at the end */
1945 if (tmp->it.last > eaddr) { 1956 if (tmp->last > eaddr) {
1946 after->it.start = eaddr + 1; 1957 after->start = eaddr + 1;
1947 after->it.last = tmp->it.last; 1958 after->last = tmp->last;
1948 after->offset = tmp->offset; 1959 after->offset = tmp->offset;
1949 after->offset += after->it.start - tmp->it.start; 1960 after->offset += after->start - tmp->start;
1950 after->flags = tmp->flags; 1961 after->flags = tmp->flags;
1951 list_add(&after->list, &tmp->list); 1962 list_add(&after->list, &tmp->list);
1952 } 1963 }
1953 1964
1954 list_del(&tmp->list); 1965 list_del(&tmp->list);
1955 list_add(&tmp->list, &removed); 1966 list_add(&tmp->list, &removed);
1967
1968 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
1956 } 1969 }
1957 1970
1958 /* And free them up */ 1971 /* And free them up */
1959 list_for_each_entry_safe(tmp, next, &removed, list) { 1972 list_for_each_entry_safe(tmp, next, &removed, list) {
1960 interval_tree_remove(&tmp->it, &vm->va); 1973 amdgpu_vm_it_remove(tmp, &vm->va);
1961 list_del(&tmp->list); 1974 list_del(&tmp->list);
1962 1975
1963 if (tmp->it.start < saddr) 1976 if (tmp->start < saddr)
1964 tmp->it.start = saddr; 1977 tmp->start = saddr;
1965 if (tmp->it.last > eaddr) 1978 if (tmp->last > eaddr)
1966 tmp->it.last = eaddr; 1979 tmp->last = eaddr;
1967 1980
1968 list_add(&tmp->list, &vm->freed); 1981 list_add(&tmp->list, &vm->freed);
1969 trace_amdgpu_vm_bo_unmap(NULL, tmp); 1982 trace_amdgpu_vm_bo_unmap(NULL, tmp);
@@ -1971,7 +1984,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
1971 1984
1972 /* Insert partial mapping before the range */ 1985 /* Insert partial mapping before the range */
1973 if (!list_empty(&before->list)) { 1986 if (!list_empty(&before->list)) {
1974 interval_tree_insert(&before->it, &vm->va); 1987 amdgpu_vm_it_insert(before, &vm->va);
1975 if (before->flags & AMDGPU_PTE_PRT) 1988 if (before->flags & AMDGPU_PTE_PRT)
1976 amdgpu_vm_prt_get(adev); 1989 amdgpu_vm_prt_get(adev);
1977 } else { 1990 } else {
@@ -1980,7 +1993,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
1980 1993
1981 /* Insert partial mapping after the range */ 1994 /* Insert partial mapping after the range */
1982 if (!list_empty(&after->list)) { 1995 if (!list_empty(&after->list)) {
1983 interval_tree_insert(&after->it, &vm->va); 1996 amdgpu_vm_it_insert(after, &vm->va);
1984 if (after->flags & AMDGPU_PTE_PRT) 1997 if (after->flags & AMDGPU_PTE_PRT)
1985 amdgpu_vm_prt_get(adev); 1998 amdgpu_vm_prt_get(adev);
1986 } else { 1999 } else {
@@ -2014,13 +2027,13 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2014 2027
2015 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { 2028 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2016 list_del(&mapping->list); 2029 list_del(&mapping->list);
2017 interval_tree_remove(&mapping->it, &vm->va); 2030 amdgpu_vm_it_remove(mapping, &vm->va);
2018 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 2031 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2019 list_add(&mapping->list, &vm->freed); 2032 list_add(&mapping->list, &vm->freed);
2020 } 2033 }
2021 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { 2034 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2022 list_del(&mapping->list); 2035 list_del(&mapping->list);
2023 interval_tree_remove(&mapping->it, &vm->va); 2036 amdgpu_vm_it_remove(mapping, &vm->va);
2024 amdgpu_vm_free_mapping(adev, vm, mapping, 2037 amdgpu_vm_free_mapping(adev, vm, mapping,
2025 bo_va->last_pt_update); 2038 bo_va->last_pt_update);
2026 } 2039 }
@@ -2051,6 +2064,44 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2051 } 2064 }
2052} 2065}
2053 2066
2067static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2068{
2069 /* Total bits covered by PD + PTs */
2070 unsigned bits = ilog2(vm_size) + 18;
2071
2072 /* Make sure the PD is 4K in size up to 8GB address space.
2073 Above that split equal between PD and PTs */
2074 if (vm_size <= 8)
2075 return (bits - 9);
2076 else
2077 return ((bits + 3) / 2);
2078}
2079
2080/**
2081 * amdgpu_vm_adjust_size - adjust vm size and block size
2082 *
2083 * @adev: amdgpu_device pointer
2084 * @vm_size: the default vm size if it's set auto
2085 */
2086void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size)
2087{
2088 /* adjust vm size firstly */
2089 if (amdgpu_vm_size == -1)
2090 adev->vm_manager.vm_size = vm_size;
2091 else
2092 adev->vm_manager.vm_size = amdgpu_vm_size;
2093
2094 /* block size depends on vm size */
2095 if (amdgpu_vm_block_size == -1)
2096 adev->vm_manager.block_size =
2097 amdgpu_vm_get_block_size(adev->vm_manager.vm_size);
2098 else
2099 adev->vm_manager.block_size = amdgpu_vm_block_size;
2100
2101 DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
2102 adev->vm_manager.vm_size, adev->vm_manager.block_size);
2103}
2104
2054/** 2105/**
2055 * amdgpu_vm_init - initialize a vm instance 2106 * amdgpu_vm_init - initialize a vm instance
2056 * 2107 *
@@ -2062,7 +2113,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2062int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) 2113int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2063{ 2114{
2064 const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, 2115 const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
2065 AMDGPU_VM_PTE_COUNT * 8); 2116 AMDGPU_VM_PTE_COUNT(adev) * 8);
2066 unsigned ring_instance; 2117 unsigned ring_instance;
2067 struct amdgpu_ring *ring; 2118 struct amdgpu_ring *ring;
2068 struct amd_sched_rq *rq; 2119 struct amd_sched_rq *rq;
@@ -2162,9 +2213,9 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2162 if (!RB_EMPTY_ROOT(&vm->va)) { 2213 if (!RB_EMPTY_ROOT(&vm->va)) {
2163 dev_err(adev->dev, "still active bo inside vm\n"); 2214 dev_err(adev->dev, "still active bo inside vm\n");
2164 } 2215 }
2165 rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) { 2216 rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, rb) {
2166 list_del(&mapping->list); 2217 list_del(&mapping->list);
2167 interval_tree_remove(&mapping->it, &vm->va); 2218 amdgpu_vm_it_remove(mapping, &vm->va);
2168 kfree(mapping); 2219 kfree(mapping);
2169 } 2220 }
2170 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { 2221 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
@@ -2227,7 +2278,6 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2227 for (i = 0; i < AMDGPU_NUM_VM; ++i) { 2278 for (i = 0; i < AMDGPU_NUM_VM; ++i) {
2228 struct amdgpu_vm_id *id = &adev->vm_manager.ids[i]; 2279 struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
2229 2280
2230 dma_fence_put(adev->vm_manager.ids[i].first);
2231 amdgpu_sync_free(&adev->vm_manager.ids[i].active); 2281 amdgpu_sync_free(&adev->vm_manager.ids[i].active);
2232 dma_fence_put(id->flushed_updates); 2282 dma_fence_put(id->flushed_updates);
2233 dma_fence_put(id->last_flush); 2283 dma_fence_put(id->last_flush);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index fbe17bf73a00..d9e57290dc71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -45,7 +45,7 @@ struct amdgpu_bo_list_entry;
45#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF 45#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
46 46
47/* number of entries in page table */ 47/* number of entries in page table */
48#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size) 48#define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
49 49
50/* PTBs (Page Table Blocks) need to be aligned to 32K */ 50/* PTBs (Page Table Blocks) need to be aligned to 32K */
51#define AMDGPU_VM_PTB_ALIGN_SIZE 32768 51#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
@@ -76,6 +76,14 @@ struct amdgpu_bo_list_entry;
76#define AMDGPU_VM_FAULT_STOP_FIRST 1 76#define AMDGPU_VM_FAULT_STOP_FIRST 1
77#define AMDGPU_VM_FAULT_STOP_ALWAYS 2 77#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
78 78
79/* max number of VMHUB */
80#define AMDGPU_MAX_VMHUBS 2
81#define AMDGPU_GFXHUB 0
82#define AMDGPU_MMHUB 1
83
84/* hardcode that limit for now */
85#define AMDGPU_VA_RESERVED_SIZE (8 << 20)
86
79struct amdgpu_vm_pt { 87struct amdgpu_vm_pt {
80 struct amdgpu_bo *bo; 88 struct amdgpu_bo *bo;
81 uint64_t addr; 89 uint64_t addr;
@@ -123,7 +131,6 @@ struct amdgpu_vm {
123 131
124struct amdgpu_vm_id { 132struct amdgpu_vm_id {
125 struct list_head list; 133 struct list_head list;
126 struct dma_fence *first;
127 struct amdgpu_sync active; 134 struct amdgpu_sync active;
128 struct dma_fence *last_flush; 135 struct dma_fence *last_flush;
129 atomic64_t owner; 136 atomic64_t owner;
@@ -155,6 +162,8 @@ struct amdgpu_vm_manager {
155 162
156 uint64_t max_pfn; 163 uint64_t max_pfn;
157 uint32_t num_level; 164 uint32_t num_level;
165 uint64_t vm_size;
166 uint32_t block_size;
158 /* vram base address for page table entry */ 167 /* vram base address for page table entry */
159 u64 vram_base_offset; 168 u64 vram_base_offset;
160 /* is vm enabled? */ 169 /* is vm enabled? */
@@ -225,5 +234,6 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
225 uint64_t saddr, uint64_t size); 234 uint64_t saddr, uint64_t size);
226void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, 235void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
227 struct amdgpu_bo_va *bo_va); 236 struct amdgpu_bo_va *bo_va);
237void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size);
228 238
229#endif 239#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 9e577e3d3147..a4831fe0223b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -93,7 +93,6 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
93 const struct ttm_place *place, 93 const struct ttm_place *place,
94 struct ttm_mem_reg *mem) 94 struct ttm_mem_reg *mem)
95{ 95{
96 struct amdgpu_bo *bo = container_of(tbo, struct amdgpu_bo, tbo);
97 struct amdgpu_vram_mgr *mgr = man->priv; 96 struct amdgpu_vram_mgr *mgr = man->priv;
98 struct drm_mm *mm = &mgr->mm; 97 struct drm_mm *mm = &mgr->mm;
99 struct drm_mm_node *nodes; 98 struct drm_mm_node *nodes;
@@ -106,8 +105,8 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
106 if (!lpfn) 105 if (!lpfn)
107 lpfn = man->size; 106 lpfn = man->size;
108 107
109 if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS || 108 if (place->flags & TTM_PL_FLAG_CONTIGUOUS ||
110 place->lpfn || amdgpu_vram_page_split == -1) { 109 amdgpu_vram_page_split == -1) {
111 pages_per_node = ~0ul; 110 pages_per_node = ~0ul;
112 num_nodes = 1; 111 num_nodes = 1;
113 } else { 112 } else {
@@ -124,12 +123,14 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
124 if (place->flags & TTM_PL_FLAG_TOPDOWN) 123 if (place->flags & TTM_PL_FLAG_TOPDOWN)
125 mode = DRM_MM_INSERT_HIGH; 124 mode = DRM_MM_INSERT_HIGH;
126 125
126 mem->start = 0;
127 pages_left = mem->num_pages; 127 pages_left = mem->num_pages;
128 128
129 spin_lock(&mgr->lock); 129 spin_lock(&mgr->lock);
130 for (i = 0; i < num_nodes; ++i) { 130 for (i = 0; i < num_nodes; ++i) {
131 unsigned long pages = min(pages_left, pages_per_node); 131 unsigned long pages = min(pages_left, pages_per_node);
132 uint32_t alignment = mem->page_alignment; 132 uint32_t alignment = mem->page_alignment;
133 unsigned long start;
133 134
134 if (pages == pages_per_node) 135 if (pages == pages_per_node)
135 alignment = pages_per_node; 136 alignment = pages_per_node;
@@ -141,11 +142,19 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
141 if (unlikely(r)) 142 if (unlikely(r))
142 goto error; 143 goto error;
143 144
145 /* Calculate a virtual BO start address to easily check if
146 * everything is CPU accessible.
147 */
148 start = nodes[i].start + nodes[i].size;
149 if (start > mem->num_pages)
150 start -= mem->num_pages;
151 else
152 start = 0;
153 mem->start = max(mem->start, start);
144 pages_left -= pages; 154 pages_left -= pages;
145 } 155 }
146 spin_unlock(&mgr->lock); 156 spin_unlock(&mgr->lock);
147 157
148 mem->start = num_nodes == 1 ? nodes[0].start : AMDGPU_BO_INVALID_OFFSET;
149 mem->mm_node = nodes; 158 mem->mm_node = nodes;
150 159
151 return 0; 160 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index f525ae4e0576..ba98d35340a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1090,23 +1090,10 @@ static u32 dce_v10_0_latency_watermark(struct dce10_wm_params *wm)
1090 a.full = dfixed_const(available_bandwidth); 1090 a.full = dfixed_const(available_bandwidth);
1091 b.full = dfixed_const(wm->num_heads); 1091 b.full = dfixed_const(wm->num_heads);
1092 a.full = dfixed_div(a, b); 1092 a.full = dfixed_div(a, b);
1093 tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
1094 tmp = min(dfixed_trunc(a), tmp);
1093 1095
1094 b.full = dfixed_const(mc_latency + 512); 1096 lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
1095 c.full = dfixed_const(wm->disp_clk);
1096 b.full = dfixed_div(b, c);
1097
1098 c.full = dfixed_const(dmif_size);
1099 b.full = dfixed_div(c, b);
1100
1101 tmp = min(dfixed_trunc(a), dfixed_trunc(b));
1102
1103 b.full = dfixed_const(1000);
1104 c.full = dfixed_const(wm->disp_clk);
1105 b.full = dfixed_div(c, b);
1106 c.full = dfixed_const(wm->bytes_per_pixel);
1107 b.full = dfixed_mul(b, c);
1108
1109 lb_fill_bw = min(tmp, dfixed_trunc(b));
1110 1097
1111 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); 1098 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
1112 b.full = dfixed_const(1000); 1099 b.full = dfixed_const(1000);
@@ -1214,14 +1201,14 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
1214{ 1201{
1215 struct drm_display_mode *mode = &amdgpu_crtc->base.mode; 1202 struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1216 struct dce10_wm_params wm_low, wm_high; 1203 struct dce10_wm_params wm_low, wm_high;
1217 u32 pixel_period; 1204 u32 active_time;
1218 u32 line_time = 0; 1205 u32 line_time = 0;
1219 u32 latency_watermark_a = 0, latency_watermark_b = 0; 1206 u32 latency_watermark_a = 0, latency_watermark_b = 0;
1220 u32 tmp, wm_mask, lb_vblank_lead_lines = 0; 1207 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1221 1208
1222 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1209 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1223 pixel_period = 1000000 / (u32)mode->clock; 1210 active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
1224 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); 1211 line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
1225 1212
1226 /* watermark for high clocks */ 1213 /* watermark for high clocks */
1227 if (adev->pm.dpm_enabled) { 1214 if (adev->pm.dpm_enabled) {
@@ -1236,7 +1223,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
1236 1223
1237 wm_high.disp_clk = mode->clock; 1224 wm_high.disp_clk = mode->clock;
1238 wm_high.src_width = mode->crtc_hdisplay; 1225 wm_high.src_width = mode->crtc_hdisplay;
1239 wm_high.active_time = mode->crtc_hdisplay * pixel_period; 1226 wm_high.active_time = active_time;
1240 wm_high.blank_time = line_time - wm_high.active_time; 1227 wm_high.blank_time = line_time - wm_high.active_time;
1241 wm_high.interlaced = false; 1228 wm_high.interlaced = false;
1242 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1229 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1275,7 +1262,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
1275 1262
1276 wm_low.disp_clk = mode->clock; 1263 wm_low.disp_clk = mode->clock;
1277 wm_low.src_width = mode->crtc_hdisplay; 1264 wm_low.src_width = mode->crtc_hdisplay;
1278 wm_low.active_time = mode->crtc_hdisplay * pixel_period; 1265 wm_low.active_time = active_time;
1279 wm_low.blank_time = line_time - wm_low.active_time; 1266 wm_low.blank_time = line_time - wm_low.active_time;
1280 wm_low.interlaced = false; 1267 wm_low.interlaced = false;
1281 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1268 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -2631,7 +2618,8 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
2631} 2618}
2632 2619
2633static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 2620static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2634 u16 *blue, uint32_t size) 2621 u16 *blue, uint32_t size,
2622 struct drm_modeset_acquire_ctx *ctx)
2635{ 2623{
2636 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2624 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2637 int i; 2625 int i;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 3eac27f24d94..e59bc42df18c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1059,23 +1059,10 @@ static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm)
1059 a.full = dfixed_const(available_bandwidth); 1059 a.full = dfixed_const(available_bandwidth);
1060 b.full = dfixed_const(wm->num_heads); 1060 b.full = dfixed_const(wm->num_heads);
1061 a.full = dfixed_div(a, b); 1061 a.full = dfixed_div(a, b);
1062 tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
1063 tmp = min(dfixed_trunc(a), tmp);
1062 1064
1063 b.full = dfixed_const(mc_latency + 512); 1065 lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
1064 c.full = dfixed_const(wm->disp_clk);
1065 b.full = dfixed_div(b, c);
1066
1067 c.full = dfixed_const(dmif_size);
1068 b.full = dfixed_div(c, b);
1069
1070 tmp = min(dfixed_trunc(a), dfixed_trunc(b));
1071
1072 b.full = dfixed_const(1000);
1073 c.full = dfixed_const(wm->disp_clk);
1074 b.full = dfixed_div(c, b);
1075 c.full = dfixed_const(wm->bytes_per_pixel);
1076 b.full = dfixed_mul(b, c);
1077
1078 lb_fill_bw = min(tmp, dfixed_trunc(b));
1079 1066
1080 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); 1067 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
1081 b.full = dfixed_const(1000); 1068 b.full = dfixed_const(1000);
@@ -1183,14 +1170,14 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
1183{ 1170{
1184 struct drm_display_mode *mode = &amdgpu_crtc->base.mode; 1171 struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1185 struct dce10_wm_params wm_low, wm_high; 1172 struct dce10_wm_params wm_low, wm_high;
1186 u32 pixel_period; 1173 u32 active_time;
1187 u32 line_time = 0; 1174 u32 line_time = 0;
1188 u32 latency_watermark_a = 0, latency_watermark_b = 0; 1175 u32 latency_watermark_a = 0, latency_watermark_b = 0;
1189 u32 tmp, wm_mask, lb_vblank_lead_lines = 0; 1176 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1190 1177
1191 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1178 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1192 pixel_period = 1000000 / (u32)mode->clock; 1179 active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
1193 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); 1180 line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
1194 1181
1195 /* watermark for high clocks */ 1182 /* watermark for high clocks */
1196 if (adev->pm.dpm_enabled) { 1183 if (adev->pm.dpm_enabled) {
@@ -1205,7 +1192,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
1205 1192
1206 wm_high.disp_clk = mode->clock; 1193 wm_high.disp_clk = mode->clock;
1207 wm_high.src_width = mode->crtc_hdisplay; 1194 wm_high.src_width = mode->crtc_hdisplay;
1208 wm_high.active_time = mode->crtc_hdisplay * pixel_period; 1195 wm_high.active_time = active_time;
1209 wm_high.blank_time = line_time - wm_high.active_time; 1196 wm_high.blank_time = line_time - wm_high.active_time;
1210 wm_high.interlaced = false; 1197 wm_high.interlaced = false;
1211 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1198 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1244,7 +1231,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
1244 1231
1245 wm_low.disp_clk = mode->clock; 1232 wm_low.disp_clk = mode->clock;
1246 wm_low.src_width = mode->crtc_hdisplay; 1233 wm_low.src_width = mode->crtc_hdisplay;
1247 wm_low.active_time = mode->crtc_hdisplay * pixel_period; 1234 wm_low.active_time = active_time;
1248 wm_low.blank_time = line_time - wm_low.active_time; 1235 wm_low.blank_time = line_time - wm_low.active_time;
1249 wm_low.interlaced = false; 1236 wm_low.interlaced = false;
1250 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1237 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -2651,7 +2638,8 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
2651} 2638}
2652 2639
2653static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 2640static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2654 u16 *blue, uint32_t size) 2641 u16 *blue, uint32_t size,
2642 struct drm_modeset_acquire_ctx *ctx)
2655{ 2643{
2656 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2644 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2657 int i; 2645 int i;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 838cf1a778f2..307269bda4fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -861,23 +861,10 @@ static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
861 a.full = dfixed_const(available_bandwidth); 861 a.full = dfixed_const(available_bandwidth);
862 b.full = dfixed_const(wm->num_heads); 862 b.full = dfixed_const(wm->num_heads);
863 a.full = dfixed_div(a, b); 863 a.full = dfixed_div(a, b);
864 tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
865 tmp = min(dfixed_trunc(a), tmp);
864 866
865 b.full = dfixed_const(mc_latency + 512); 867 lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
866 c.full = dfixed_const(wm->disp_clk);
867 b.full = dfixed_div(b, c);
868
869 c.full = dfixed_const(dmif_size);
870 b.full = dfixed_div(c, b);
871
872 tmp = min(dfixed_trunc(a), dfixed_trunc(b));
873
874 b.full = dfixed_const(1000);
875 c.full = dfixed_const(wm->disp_clk);
876 b.full = dfixed_div(c, b);
877 c.full = dfixed_const(wm->bytes_per_pixel);
878 b.full = dfixed_mul(b, c);
879
880 lb_fill_bw = min(tmp, dfixed_trunc(b));
881 868
882 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); 869 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
883 b.full = dfixed_const(1000); 870 b.full = dfixed_const(1000);
@@ -986,7 +973,7 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
986 struct drm_display_mode *mode = &amdgpu_crtc->base.mode; 973 struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
987 struct dce6_wm_params wm_low, wm_high; 974 struct dce6_wm_params wm_low, wm_high;
988 u32 dram_channels; 975 u32 dram_channels;
989 u32 pixel_period; 976 u32 active_time;
990 u32 line_time = 0; 977 u32 line_time = 0;
991 u32 latency_watermark_a = 0, latency_watermark_b = 0; 978 u32 latency_watermark_a = 0, latency_watermark_b = 0;
992 u32 priority_a_mark = 0, priority_b_mark = 0; 979 u32 priority_a_mark = 0, priority_b_mark = 0;
@@ -996,8 +983,8 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
996 fixed20_12 a, b, c; 983 fixed20_12 a, b, c;
997 984
998 if (amdgpu_crtc->base.enabled && num_heads && mode) { 985 if (amdgpu_crtc->base.enabled && num_heads && mode) {
999 pixel_period = 1000000 / (u32)mode->clock; 986 active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
1000 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); 987 line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
1001 priority_a_cnt = 0; 988 priority_a_cnt = 0;
1002 priority_b_cnt = 0; 989 priority_b_cnt = 0;
1003 990
@@ -1016,7 +1003,7 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
1016 1003
1017 wm_high.disp_clk = mode->clock; 1004 wm_high.disp_clk = mode->clock;
1018 wm_high.src_width = mode->crtc_hdisplay; 1005 wm_high.src_width = mode->crtc_hdisplay;
1019 wm_high.active_time = mode->crtc_hdisplay * pixel_period; 1006 wm_high.active_time = active_time;
1020 wm_high.blank_time = line_time - wm_high.active_time; 1007 wm_high.blank_time = line_time - wm_high.active_time;
1021 wm_high.interlaced = false; 1008 wm_high.interlaced = false;
1022 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1009 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1043,7 +1030,7 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
1043 1030
1044 wm_low.disp_clk = mode->clock; 1031 wm_low.disp_clk = mode->clock;
1045 wm_low.src_width = mode->crtc_hdisplay; 1032 wm_low.src_width = mode->crtc_hdisplay;
1046 wm_low.active_time = mode->crtc_hdisplay * pixel_period; 1033 wm_low.active_time = active_time;
1047 wm_low.blank_time = line_time - wm_low.active_time; 1034 wm_low.blank_time = line_time - wm_low.active_time;
1048 wm_low.interlaced = false; 1035 wm_low.interlaced = false;
1049 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1036 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1998,7 +1985,8 @@ static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
1998} 1985}
1999 1986
2000static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 1987static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2001 u16 *blue, uint32_t size) 1988 u16 *blue, uint32_t size,
1989 struct drm_modeset_acquire_ctx *ctx)
2002{ 1990{
2003 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1991 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2004 int i; 1992 int i;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 1b0717b11efe..6df7a28e8aac 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -974,23 +974,10 @@ static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
974 a.full = dfixed_const(available_bandwidth); 974 a.full = dfixed_const(available_bandwidth);
975 b.full = dfixed_const(wm->num_heads); 975 b.full = dfixed_const(wm->num_heads);
976 a.full = dfixed_div(a, b); 976 a.full = dfixed_div(a, b);
977 tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
978 tmp = min(dfixed_trunc(a), tmp);
977 979
978 b.full = dfixed_const(mc_latency + 512); 980 lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
979 c.full = dfixed_const(wm->disp_clk);
980 b.full = dfixed_div(b, c);
981
982 c.full = dfixed_const(dmif_size);
983 b.full = dfixed_div(c, b);
984
985 tmp = min(dfixed_trunc(a), dfixed_trunc(b));
986
987 b.full = dfixed_const(1000);
988 c.full = dfixed_const(wm->disp_clk);
989 b.full = dfixed_div(c, b);
990 c.full = dfixed_const(wm->bytes_per_pixel);
991 b.full = dfixed_mul(b, c);
992
993 lb_fill_bw = min(tmp, dfixed_trunc(b));
994 981
995 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); 982 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
996 b.full = dfixed_const(1000); 983 b.full = dfixed_const(1000);
@@ -1098,14 +1085,14 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
1098{ 1085{
1099 struct drm_display_mode *mode = &amdgpu_crtc->base.mode; 1086 struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1100 struct dce8_wm_params wm_low, wm_high; 1087 struct dce8_wm_params wm_low, wm_high;
1101 u32 pixel_period; 1088 u32 active_time;
1102 u32 line_time = 0; 1089 u32 line_time = 0;
1103 u32 latency_watermark_a = 0, latency_watermark_b = 0; 1090 u32 latency_watermark_a = 0, latency_watermark_b = 0;
1104 u32 tmp, wm_mask, lb_vblank_lead_lines = 0; 1091 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1105 1092
1106 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1093 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1107 pixel_period = 1000000 / (u32)mode->clock; 1094 active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
1108 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); 1095 line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
1109 1096
1110 /* watermark for high clocks */ 1097 /* watermark for high clocks */
1111 if (adev->pm.dpm_enabled) { 1098 if (adev->pm.dpm_enabled) {
@@ -1120,7 +1107,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
1120 1107
1121 wm_high.disp_clk = mode->clock; 1108 wm_high.disp_clk = mode->clock;
1122 wm_high.src_width = mode->crtc_hdisplay; 1109 wm_high.src_width = mode->crtc_hdisplay;
1123 wm_high.active_time = mode->crtc_hdisplay * pixel_period; 1110 wm_high.active_time = active_time;
1124 wm_high.blank_time = line_time - wm_high.active_time; 1111 wm_high.blank_time = line_time - wm_high.active_time;
1125 wm_high.interlaced = false; 1112 wm_high.interlaced = false;
1126 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1113 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1159,7 +1146,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
1159 1146
1160 wm_low.disp_clk = mode->clock; 1147 wm_low.disp_clk = mode->clock;
1161 wm_low.src_width = mode->crtc_hdisplay; 1148 wm_low.src_width = mode->crtc_hdisplay;
1162 wm_low.active_time = mode->crtc_hdisplay * pixel_period; 1149 wm_low.active_time = active_time;
1163 wm_low.blank_time = line_time - wm_low.active_time; 1150 wm_low.blank_time = line_time - wm_low.active_time;
1164 wm_low.interlaced = false; 1151 wm_low.interlaced = false;
1165 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1152 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -2482,7 +2469,8 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2482} 2469}
2483 2470
2484static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 2471static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2485 u16 *blue, uint32_t size) 2472 u16 *blue, uint32_t size,
2473 struct drm_modeset_acquire_ctx *ctx)
2486{ 2474{
2487 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2475 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2488 int i; 2476 int i;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 5c51f9a97811..81a24b6b4846 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -165,7 +165,8 @@ static void dce_virtual_bandwidth_update(struct amdgpu_device *adev)
165} 165}
166 166
167static int dce_virtual_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, 167static int dce_virtual_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
168 u16 *green, u16 *blue, uint32_t size) 168 u16 *green, u16 *blue, uint32_t size,
169 struct drm_modeset_acquire_ctx *ctx)
169{ 170{
170 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 171 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
171 int i; 172 int i;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index e0fa0d30e162..dad8a4cd1b37 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4565,6 +4565,7 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
4565 WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); 4565 WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
4566 for (i = 0; i < adev->gfx.num_compute_rings; i++) 4566 for (i = 0; i < adev->gfx.num_compute_rings; i++)
4567 adev->gfx.compute_ring[i].ready = false; 4567 adev->gfx.compute_ring[i].ready = false;
4568 adev->gfx.kiq.ring.ready = false;
4568 } 4569 }
4569 udelay(50); 4570 udelay(50);
4570} 4571}
@@ -4721,14 +4722,10 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
4721 mqd->cp_hqd_eop_control = tmp; 4722 mqd->cp_hqd_eop_control = tmp;
4722 4723
4723 /* enable doorbell? */ 4724 /* enable doorbell? */
4724 tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); 4725 tmp = REG_SET_FIELD(RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL),
4725 4726 CP_HQD_PQ_DOORBELL_CONTROL,
4726 if (ring->use_doorbell) 4727 DOORBELL_EN,
4727 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4728 ring->use_doorbell ? 1 : 0);
4728 DOORBELL_EN, 1);
4729 else
4730 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4731 DOORBELL_EN, 0);
4732 4729
4733 mqd->cp_hqd_pq_doorbell_control = tmp; 4730 mqd->cp_hqd_pq_doorbell_control = tmp;
4734 4731
@@ -4816,13 +4813,10 @@ static int gfx_v8_0_kiq_init_register(struct amdgpu_ring *ring)
4816{ 4813{
4817 struct amdgpu_device *adev = ring->adev; 4814 struct amdgpu_device *adev = ring->adev;
4818 struct vi_mqd *mqd = ring->mqd_ptr; 4815 struct vi_mqd *mqd = ring->mqd_ptr;
4819 uint32_t tmp;
4820 int j; 4816 int j;
4821 4817
4822 /* disable wptr polling */ 4818 /* disable wptr polling */
4823 tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL); 4819 WREG32_FIELD(CP_PQ_WPTR_POLL_CNTL, EN, 0);
4824 tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
4825 WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp);
4826 4820
4827 WREG32(mmCP_HQD_EOP_BASE_ADDR, mqd->cp_hqd_eop_base_addr_lo); 4821 WREG32(mmCP_HQD_EOP_BASE_ADDR, mqd->cp_hqd_eop_base_addr_lo);
4828 WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, mqd->cp_hqd_eop_base_addr_hi); 4822 WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, mqd->cp_hqd_eop_base_addr_hi);
@@ -4834,10 +4828,10 @@ static int gfx_v8_0_kiq_init_register(struct amdgpu_ring *ring)
4834 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, mqd->cp_hqd_pq_doorbell_control); 4828 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, mqd->cp_hqd_pq_doorbell_control);
4835 4829
4836 /* disable the queue if it's active */ 4830 /* disable the queue if it's active */
4837 if (RREG32(mmCP_HQD_ACTIVE) & 1) { 4831 if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
4838 WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1); 4832 WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
4839 for (j = 0; j < adev->usec_timeout; j++) { 4833 for (j = 0; j < adev->usec_timeout; j++) {
4840 if (!(RREG32(mmCP_HQD_ACTIVE) & 1)) 4834 if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
4841 break; 4835 break;
4842 udelay(1); 4836 udelay(1);
4843 } 4837 }
@@ -4894,11 +4888,8 @@ static int gfx_v8_0_kiq_init_register(struct amdgpu_ring *ring)
4894 /* activate the queue */ 4888 /* activate the queue */
4895 WREG32(mmCP_HQD_ACTIVE, mqd->cp_hqd_active); 4889 WREG32(mmCP_HQD_ACTIVE, mqd->cp_hqd_active);
4896 4890
4897 if (ring->use_doorbell) { 4891 if (ring->use_doorbell)
4898 tmp = RREG32(mmCP_PQ_STATUS); 4892 WREG32_FIELD(CP_PQ_STATUS, DOORBELL_ENABLE, 1);
4899 tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
4900 WREG32(mmCP_PQ_STATUS, tmp);
4901 }
4902 4893
4903 return 0; 4894 return 0;
4904} 4895}
@@ -5471,19 +5462,18 @@ static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev,
5471{ 5462{
5472 int i; 5463 int i;
5473 5464
5465 mutex_lock(&adev->srbm_mutex);
5474 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 5466 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
5475 if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) { 5467 if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
5476 u32 tmp; 5468 WREG32_FIELD(CP_HQD_DEQUEUE_REQUEST, DEQUEUE_REQ, 2);
5477 tmp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
5478 tmp = REG_SET_FIELD(tmp, CP_HQD_DEQUEUE_REQUEST,
5479 DEQUEUE_REQ, 2);
5480 WREG32(mmCP_HQD_DEQUEUE_REQUEST, tmp);
5481 for (i = 0; i < adev->usec_timeout; i++) { 5469 for (i = 0; i < adev->usec_timeout; i++) {
5482 if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK)) 5470 if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
5483 break; 5471 break;
5484 udelay(1); 5472 udelay(1);
5485 } 5473 }
5486 } 5474 }
5475 vi_srbm_select(adev, 0, 0, 0, 0);
5476 mutex_unlock(&adev->srbm_mutex);
5487} 5477}
5488 5478
5489static int gfx_v8_0_pre_soft_reset(void *handle) 5479static int gfx_v8_0_pre_soft_reset(void *handle)
@@ -5589,11 +5579,13 @@ static int gfx_v8_0_soft_reset(void *handle)
5589static void gfx_v8_0_init_hqd(struct amdgpu_device *adev, 5579static void gfx_v8_0_init_hqd(struct amdgpu_device *adev,
5590 struct amdgpu_ring *ring) 5580 struct amdgpu_ring *ring)
5591{ 5581{
5582 mutex_lock(&adev->srbm_mutex);
5592 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 5583 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
5593 WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0); 5584 WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
5594 WREG32(mmCP_HQD_PQ_RPTR, 0); 5585 WREG32(mmCP_HQD_PQ_RPTR, 0);
5595 WREG32(mmCP_HQD_PQ_WPTR, 0); 5586 WREG32(mmCP_HQD_PQ_WPTR, 0);
5596 vi_srbm_select(adev, 0, 0, 0, 0); 5587 vi_srbm_select(adev, 0, 0, 0, 0);
5588 mutex_unlock(&adev->srbm_mutex);
5597} 5589}
5598 5590
5599static int gfx_v8_0_post_soft_reset(void *handle) 5591static int gfx_v8_0_post_soft_reset(void *handle)
@@ -6986,40 +6978,24 @@ static int gfx_v8_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
6986 unsigned int type, 6978 unsigned int type,
6987 enum amdgpu_interrupt_state state) 6979 enum amdgpu_interrupt_state state)
6988{ 6980{
6989 uint32_t tmp, target;
6990 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring); 6981 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
6991 6982
6992 BUG_ON(ring->funcs->type != AMDGPU_RING_TYPE_KIQ); 6983 BUG_ON(ring->funcs->type != AMDGPU_RING_TYPE_KIQ);
6993 6984
6994 if (ring->me == 1)
6995 target = mmCP_ME1_PIPE0_INT_CNTL;
6996 else
6997 target = mmCP_ME2_PIPE0_INT_CNTL;
6998 target += ring->pipe;
6999
7000 switch (type) { 6985 switch (type) {
7001 case AMDGPU_CP_KIQ_IRQ_DRIVER0: 6986 case AMDGPU_CP_KIQ_IRQ_DRIVER0:
7002 if (state == AMDGPU_IRQ_STATE_DISABLE) { 6987 WREG32_FIELD(CPC_INT_CNTL, GENERIC2_INT_ENABLE,
7003 tmp = RREG32(mmCPC_INT_CNTL); 6988 state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
7004 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, 6989 if (ring->me == 1)
7005 GENERIC2_INT_ENABLE, 0); 6990 WREG32_FIELD_OFFSET(CP_ME1_PIPE0_INT_CNTL,
7006 WREG32(mmCPC_INT_CNTL, tmp); 6991 ring->pipe,
7007 6992 GENERIC2_INT_ENABLE,
7008 tmp = RREG32(target); 6993 state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
7009 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL, 6994 else
7010 GENERIC2_INT_ENABLE, 0); 6995 WREG32_FIELD_OFFSET(CP_ME2_PIPE0_INT_CNTL,
7011 WREG32(target, tmp); 6996 ring->pipe,
7012 } else { 6997 GENERIC2_INT_ENABLE,
7013 tmp = RREG32(mmCPC_INT_CNTL); 6998 state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
7014 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
7015 GENERIC2_INT_ENABLE, 1);
7016 WREG32(mmCPC_INT_CNTL, tmp);
7017
7018 tmp = RREG32(target);
7019 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
7020 GENERIC2_INT_ENABLE, 1);
7021 WREG32(target, tmp);
7022 }
7023 break; 6999 break;
7024 default: 7000 default:
7025 BUG(); /* kiq only support GENERIC2_INT now */ 7001 BUG(); /* kiq only support GENERIC2_INT now */
@@ -7159,8 +7135,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
7159 .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */ 7135 .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
7160 .emit_ib = gfx_v8_0_ring_emit_ib_compute, 7136 .emit_ib = gfx_v8_0_ring_emit_ib_compute,
7161 .emit_fence = gfx_v8_0_ring_emit_fence_kiq, 7137 .emit_fence = gfx_v8_0_ring_emit_fence_kiq,
7162 .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
7163 .emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate,
7164 .test_ring = gfx_v8_0_ring_test_ring, 7138 .test_ring = gfx_v8_0_ring_test_ring,
7165 .test_ib = gfx_v8_0_ring_test_ib, 7139 .test_ib = gfx_v8_0_ring_test_ib,
7166 .insert_nop = amdgpu_ring_insert_nop, 7140 .insert_nop = amdgpu_ring_insert_nop,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 669bb98fc45d..a447b70841c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1288,9 +1288,7 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
1288 u32 tmp; 1288 u32 tmp;
1289 int i; 1289 int i;
1290 1290
1291 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_CNTL)); 1291 WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1292 tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff);
1293 WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_CNTL), tmp);
1294 1292
1295 gfx_v9_0_tiling_mode_table_init(adev); 1293 gfx_v9_0_tiling_mode_table_init(adev);
1296 1294
@@ -1395,13 +1393,9 @@ void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
1395 1393
1396static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev) 1394static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
1397{ 1395{
1398 u32 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_SOFT_RESET)); 1396 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
1399
1400 tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
1401 WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_SOFT_RESET), tmp);
1402 udelay(50); 1397 udelay(50);
1403 tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); 1398 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
1404 WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_SOFT_RESET), tmp);
1405 udelay(50); 1399 udelay(50);
1406} 1400}
1407 1401
@@ -1410,10 +1404,8 @@ static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
1410#ifdef AMDGPU_RLC_DEBUG_RETRY 1404#ifdef AMDGPU_RLC_DEBUG_RETRY
1411 u32 rlc_ucode_ver; 1405 u32 rlc_ucode_ver;
1412#endif 1406#endif
1413 u32 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CNTL));
1414 1407
1415 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 1); 1408 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
1416 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CNTL), tmp);
1417 1409
1418 /* carrizo do enable cp interrupt after cp inited */ 1410 /* carrizo do enable cp interrupt after cp inited */
1419 if (!(adev->flags & AMD_IS_APU)) 1411 if (!(adev->flags & AMD_IS_APU))
@@ -1497,14 +1489,10 @@ static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
1497 int i; 1489 int i;
1498 u32 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_ME_CNTL)); 1490 u32 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_ME_CNTL));
1499 1491
1500 if (enable) { 1492 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
1501 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 0); 1493 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
1502 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 0); 1494 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
1503 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 0); 1495 if (!enable) {
1504 } else {
1505 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
1506 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
1507 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
1508 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 1496 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1509 adev->gfx.gfx_ring[i].ready = false; 1497 adev->gfx.gfx_ring[i].ready = false;
1510 } 1498 }
@@ -2020,13 +2008,10 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
2020{ 2008{
2021 struct amdgpu_device *adev = ring->adev; 2009 struct amdgpu_device *adev = ring->adev;
2022 struct v9_mqd *mqd = ring->mqd_ptr; 2010 struct v9_mqd *mqd = ring->mqd_ptr;
2023 uint32_t tmp;
2024 int j; 2011 int j;
2025 2012
2026 /* disable wptr polling */ 2013 /* disable wptr polling */
2027 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL)); 2014 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
2028 tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
2029 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL), tmp);
2030 2015
2031 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_BASE_ADDR), 2016 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_BASE_ADDR),
2032 mqd->cp_hqd_eop_base_addr_lo); 2017 mqd->cp_hqd_eop_base_addr_lo);
@@ -2118,11 +2103,8 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
2118 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), 2103 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE),
2119 mqd->cp_hqd_active); 2104 mqd->cp_hqd_active);
2120 2105
2121 if (ring->use_doorbell) { 2106 if (ring->use_doorbell)
2122 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_STATUS)); 2107 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
2123 tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
2124 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_STATUS), tmp);
2125 }
2126 2108
2127 return 0; 2109 return 0;
2128} 2110}
@@ -2366,177 +2348,6 @@ static int gfx_v9_0_wait_for_idle(void *handle)
2366 return -ETIMEDOUT; 2348 return -ETIMEDOUT;
2367} 2349}
2368 2350
2369static void gfx_v9_0_print_status(void *handle)
2370{
2371 int i;
2372 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2373
2374 dev_info(adev->dev, "GFX 9.x registers\n");
2375 dev_info(adev->dev, " GRBM_STATUS=0x%08X\n",
2376 RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS)));
2377 dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n",
2378 RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS2)));
2379 dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n",
2380 RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE0)));
2381 dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n",
2382 RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE1)));
2383 dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n",
2384 RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE2)));
2385 dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n",
2386 RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE3)));
2387 dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_STAT)));
2388 dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
2389 RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT1)));
2390 dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
2391 RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT2)));
2392 dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
2393 RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT3)));
2394 dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
2395 RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPF_BUSY_STAT)));
2396 dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
2397 RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STALLED_STAT1)));
2398 dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STATUS)));
2399 dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPC_BUSY_STAT)));
2400 dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
2401 RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STALLED_STAT1)));
2402 dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STATUS)));
2403
2404 for (i = 0; i < 32; i++) {
2405 dev_info(adev->dev, " GB_TILE_MODE%d=0x%08X\n",
2406 i, RREG32(SOC15_REG_OFFSET(GC, 0, mmGB_TILE_MODE0 ) + i*4));
2407 }
2408 for (i = 0; i < 16; i++) {
2409 dev_info(adev->dev, " GB_MACROTILE_MODE%d=0x%08X\n",
2410 i, RREG32(SOC15_REG_OFFSET(GC, 0, mmGB_MACROTILE_MODE0) + i*4));
2411 }
2412 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2413 dev_info(adev->dev, " se: %d\n", i);
2414 gfx_v9_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff);
2415 dev_info(adev->dev, " PA_SC_RASTER_CONFIG=0x%08X\n",
2416 RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_RASTER_CONFIG)));
2417 dev_info(adev->dev, " PA_SC_RASTER_CONFIG_1=0x%08X\n",
2418 RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_RASTER_CONFIG_1)));
2419 }
2420 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2421
2422 dev_info(adev->dev, " GB_ADDR_CONFIG=0x%08X\n",
2423 RREG32(SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)));
2424
2425 dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n",
2426 RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEQ_THRESHOLDS)));
2427 dev_info(adev->dev, " SX_DEBUG_1=0x%08X\n",
2428 RREG32(SOC15_REG_OFFSET(GC, 0, mmSX_DEBUG_1)));
2429 dev_info(adev->dev, " TA_CNTL_AUX=0x%08X\n",
2430 RREG32(SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX)));
2431 dev_info(adev->dev, " SPI_CONFIG_CNTL=0x%08X\n",
2432 RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL)));
2433 dev_info(adev->dev, " SQ_CONFIG=0x%08X\n",
2434 RREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CONFIG)));
2435 dev_info(adev->dev, " DB_DEBUG=0x%08X\n",
2436 RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG)));
2437 dev_info(adev->dev, " DB_DEBUG2=0x%08X\n",
2438 RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2)));
2439 dev_info(adev->dev, " DB_DEBUG3=0x%08X\n",
2440 RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG3)));
2441 dev_info(adev->dev, " CB_HW_CONTROL=0x%08X\n",
2442 RREG32(SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL)));
2443 dev_info(adev->dev, " SPI_CONFIG_CNTL_1=0x%08X\n",
2444 RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_1)));
2445 dev_info(adev->dev, " PA_SC_FIFO_SIZE=0x%08X\n",
2446 RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_FIFO_SIZE)));
2447 dev_info(adev->dev, " VGT_NUM_INSTANCES=0x%08X\n",
2448 RREG32(SOC15_REG_OFFSET(GC, 0, mmVGT_NUM_INSTANCES)));
2449 dev_info(adev->dev, " CP_PERFMON_CNTL=0x%08X\n",
2450 RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PERFMON_CNTL)));
2451 dev_info(adev->dev, " PA_SC_FORCE_EOV_MAX_CNTS=0x%08X\n",
2452 RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_FORCE_EOV_MAX_CNTS)));
2453 dev_info(adev->dev, " VGT_CACHE_INVALIDATION=0x%08X\n",
2454 RREG32(SOC15_REG_OFFSET(GC, 0, mmVGT_CACHE_INVALIDATION)));
2455 dev_info(adev->dev, " VGT_GS_VERTEX_REUSE=0x%08X\n",
2456 RREG32(SOC15_REG_OFFSET(GC, 0, mmVGT_GS_VERTEX_REUSE)));
2457 dev_info(adev->dev, " PA_SC_LINE_STIPPLE_STATE=0x%08X\n",
2458 RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_LINE_STIPPLE_STATE)));
2459 dev_info(adev->dev, " PA_CL_ENHANCE=0x%08X\n",
2460 RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_CL_ENHANCE)));
2461 dev_info(adev->dev, " PA_SC_ENHANCE=0x%08X\n",
2462 RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE)));
2463
2464 dev_info(adev->dev, " CP_ME_CNTL=0x%08X\n",
2465 RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_ME_CNTL)));
2466 dev_info(adev->dev, " CP_MAX_CONTEXT=0x%08X\n",
2467 RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MAX_CONTEXT)));
2468 dev_info(adev->dev, " CP_DEVICE_ID=0x%08X\n",
2469 RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_DEVICE_ID)));
2470
2471 dev_info(adev->dev, " CP_SEM_WAIT_TIMER=0x%08X\n",
2472 RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_SEM_WAIT_TIMER)));
2473
2474 dev_info(adev->dev, " CP_RB_WPTR_DELAY=0x%08X\n",
2475 RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_DELAY)));
2476 dev_info(adev->dev, " CP_RB_VMID=0x%08X\n",
2477 RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_VMID)));
2478 dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n",
2479 RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_CNTL)));
2480 dev_info(adev->dev, " CP_RB0_WPTR=0x%08X\n",
2481 RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_WPTR)));
2482 dev_info(adev->dev, " CP_RB0_RPTR_ADDR=0x%08X\n",
2483 RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_RPTR_ADDR)));
2484 dev_info(adev->dev, " CP_RB0_RPTR_ADDR_HI=0x%08X\n",
2485 RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_RPTR_ADDR_HI)));
2486 dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n",
2487 RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_CNTL)));
2488 dev_info(adev->dev, " CP_RB0_BASE=0x%08X\n",
2489 RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_BASE)));
2490 dev_info(adev->dev, " CP_RB0_BASE_HI=0x%08X\n",
2491 RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_BASE_HI)));
2492 dev_info(adev->dev, " CP_MEC_CNTL=0x%08X\n",
2493 RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_CNTL)));
2494
2495 dev_info(adev->dev, " SCRATCH_ADDR=0x%08X\n",
2496 RREG32(SOC15_REG_OFFSET(GC, 0, mmSCRATCH_ADDR)));
2497 dev_info(adev->dev, " SCRATCH_UMSK=0x%08X\n",
2498 RREG32(SOC15_REG_OFFSET(GC, 0, mmSCRATCH_UMSK)));
2499
2500 dev_info(adev->dev, " CP_INT_CNTL_RING0=0x%08X\n",
2501 RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0)));
2502 dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n",
2503 RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_CNTL)));
2504 dev_info(adev->dev, " RLC_CNTL=0x%08X\n",
2505 RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CNTL)));
2506 dev_info(adev->dev, " RLC_CGCG_CGLS_CTRL=0x%08X\n",
2507 RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL)));
2508 dev_info(adev->dev, " RLC_LB_CNTR_INIT=0x%08X\n",
2509 RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_CNTR_INIT)));
2510 dev_info(adev->dev, " RLC_LB_CNTR_MAX=0x%08X\n",
2511 RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_CNTR_MAX)));
2512 dev_info(adev->dev, " RLC_LB_INIT_CU_MASK=0x%08X\n",
2513 RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_INIT_CU_MASK)));
2514 dev_info(adev->dev, " RLC_LB_PARAMS=0x%08X\n",
2515 RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_PARAMS)));
2516 dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n",
2517 RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_CNTL)));
2518 dev_info(adev->dev, " RLC_UCODE_CNTL=0x%08X\n",
2519 RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_UCODE_CNTL)));
2520
2521 dev_info(adev->dev, " RLC_GPM_GENERAL_6=0x%08X\n",
2522 RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_GENERAL_6)));
2523 dev_info(adev->dev, " RLC_GPM_GENERAL_12=0x%08X\n",
2524 RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_GENERAL_12)));
2525 dev_info(adev->dev, " RLC_GPM_TIMER_INT_3=0x%08X\n",
2526 RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_TIMER_INT_3)));
2527 mutex_lock(&adev->srbm_mutex);
2528 for (i = 0; i < 16; i++) {
2529 soc15_grbm_select(adev, 0, 0, 0, i);
2530 dev_info(adev->dev, " VM %d:\n", i);
2531 dev_info(adev->dev, " SH_MEM_CONFIG=0x%08X\n",
2532 RREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG)));
2533 dev_info(adev->dev, " SH_MEM_BASES=0x%08X\n",
2534 RREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES)));
2535 }
2536 soc15_grbm_select(adev, 0, 0, 0, 0);
2537 mutex_unlock(&adev->srbm_mutex);
2538}
2539
2540static int gfx_v9_0_soft_reset(void *handle) 2351static int gfx_v9_0_soft_reset(void *handle)
2541{ 2352{
2542 u32 grbm_soft_reset = 0; 2353 u32 grbm_soft_reset = 0;
@@ -2569,8 +2380,7 @@ static int gfx_v9_0_soft_reset(void *handle)
2569 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); 2380 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2570 2381
2571 2382
2572 if (grbm_soft_reset ) { 2383 if (grbm_soft_reset) {
2573 gfx_v9_0_print_status((void *)adev);
2574 /* stop the rlc */ 2384 /* stop the rlc */
2575 gfx_v9_0_rlc_stop(adev); 2385 gfx_v9_0_rlc_stop(adev);
2576 2386
@@ -2596,7 +2406,6 @@ static int gfx_v9_0_soft_reset(void *handle)
2596 2406
2597 /* Wait a little for things to settle down */ 2407 /* Wait a little for things to settle down */
2598 udelay(50); 2408 udelay(50);
2599 gfx_v9_0_print_status((void *)adev);
2600 } 2409 }
2601 return 0; 2410 return 0;
2602} 2411}
@@ -3148,6 +2957,7 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3148 unsigned vm_id, uint64_t pd_addr) 2957 unsigned vm_id, uint64_t pd_addr)
3149{ 2958{
3150 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 2959 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
2960 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
3151 unsigned eng = ring->idx; 2961 unsigned eng = ring->idx;
3152 unsigned i; 2962 unsigned i;
3153 2963
@@ -3157,7 +2967,6 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3157 2967
3158 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 2968 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
3159 struct amdgpu_vmhub *hub = &ring->adev->vmhub[i]; 2969 struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
3160 uint32_t req = hub->get_invalidate_req(vm_id);
3161 2970
3162 gfx_v9_0_write_data_to_reg(ring, usepfp, true, 2971 gfx_v9_0_write_data_to_reg(ring, usepfp, true,
3163 hub->ctx0_ptb_addr_lo32 2972 hub->ctx0_ptb_addr_lo32
@@ -3376,21 +3185,12 @@ static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
3376static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, 3185static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
3377 enum amdgpu_interrupt_state state) 3186 enum amdgpu_interrupt_state state)
3378{ 3187{
3379 u32 cp_int_cntl;
3380
3381 switch (state) { 3188 switch (state) {
3382 case AMDGPU_IRQ_STATE_DISABLE: 3189 case AMDGPU_IRQ_STATE_DISABLE:
3383 cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
3384 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
3385 TIME_STAMP_INT_ENABLE, 0);
3386 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
3387 break;
3388 case AMDGPU_IRQ_STATE_ENABLE: 3190 case AMDGPU_IRQ_STATE_ENABLE:
3389 cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0)); 3191 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
3390 cp_int_cntl = 3192 TIME_STAMP_INT_ENABLE,
3391 REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 3193 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3392 TIME_STAMP_INT_ENABLE, 1);
3393 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
3394 break; 3194 break;
3395 default: 3195 default:
3396 break; 3196 break;
@@ -3446,20 +3246,12 @@ static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
3446 unsigned type, 3246 unsigned type,
3447 enum amdgpu_interrupt_state state) 3247 enum amdgpu_interrupt_state state)
3448{ 3248{
3449 u32 cp_int_cntl;
3450
3451 switch (state) { 3249 switch (state) {
3452 case AMDGPU_IRQ_STATE_DISABLE: 3250 case AMDGPU_IRQ_STATE_DISABLE:
3453 cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
3454 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
3455 PRIV_REG_INT_ENABLE, 0);
3456 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
3457 break;
3458 case AMDGPU_IRQ_STATE_ENABLE: 3251 case AMDGPU_IRQ_STATE_ENABLE:
3459 cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0)); 3252 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
3460 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 3253 PRIV_REG_INT_ENABLE,
3461 PRIV_REG_INT_ENABLE, 1); 3254 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3462 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
3463 break; 3255 break;
3464 default: 3256 default:
3465 break; 3257 break;
@@ -3473,21 +3265,12 @@ static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
3473 unsigned type, 3265 unsigned type,
3474 enum amdgpu_interrupt_state state) 3266 enum amdgpu_interrupt_state state)
3475{ 3267{
3476 u32 cp_int_cntl;
3477
3478 switch (state) { 3268 switch (state) {
3479 case AMDGPU_IRQ_STATE_DISABLE: 3269 case AMDGPU_IRQ_STATE_DISABLE:
3480 cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
3481 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
3482 PRIV_INSTR_INT_ENABLE, 0);
3483 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
3484 break;
3485 case AMDGPU_IRQ_STATE_ENABLE: 3270 case AMDGPU_IRQ_STATE_ENABLE:
3486 cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0)); 3271 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
3487 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 3272 PRIV_INSTR_INT_ENABLE,
3488 PRIV_INSTR_INT_ENABLE, 1); 3273 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3489 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
3490 break;
3491 default: 3274 default:
3492 break; 3275 break;
3493 } 3276 }
@@ -3759,8 +3542,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
3759 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */ 3542 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
3760 .emit_ib = gfx_v9_0_ring_emit_ib_compute, 3543 .emit_ib = gfx_v9_0_ring_emit_ib_compute,
3761 .emit_fence = gfx_v9_0_ring_emit_fence_kiq, 3544 .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
3762 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
3763 .emit_hdp_invalidate = gfx_v9_0_ring_emit_hdp_invalidate,
3764 .test_ring = gfx_v9_0_ring_test_ring, 3545 .test_ring = gfx_v9_0_ring_test_ring,
3765 .test_ib = gfx_v9_0_ring_test_ib, 3546 .test_ib = gfx_v9_0_ring_test_ib,
3766 .insert_nop = amdgpu_ring_insert_nop, 3547 .insert_nop = amdgpu_ring_insert_nop,
@@ -3975,9 +3756,7 @@ static int gfx_v9_0_init_queue(struct amdgpu_ring *ring)
3975 ring->pipe, 3756 ring->pipe,
3976 ring->queue, 0); 3757 ring->queue, 0);
3977 /* disable wptr polling */ 3758 /* disable wptr polling */
3978 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL)); 3759 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3979 tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3980 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL), tmp);
3981 3760
3982 /* write the EOP addr */ 3761 /* write the EOP addr */
3983 BUG_ON(ring->me != 1 || ring->pipe != 0); /* can't handle other cases eop address */ 3762 BUG_ON(ring->me != 1 || ring->pipe != 0); /* can't handle other cases eop address */
@@ -4121,11 +3900,8 @@ static int gfx_v9_0_init_queue(struct amdgpu_ring *ring)
4121 amdgpu_bo_kunmap(ring->mqd_obj); 3900 amdgpu_bo_kunmap(ring->mqd_obj);
4122 amdgpu_bo_unreserve(ring->mqd_obj); 3901 amdgpu_bo_unreserve(ring->mqd_obj);
4123 3902
4124 if (use_doorbell) { 3903 if (use_doorbell)
4125 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_STATUS)); 3904 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
4126 tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
4127 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_STATUS), tmp);
4128 }
4129 3905
4130 return 0; 3906 return 0;
4131} 3907}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 30ef3126c8a9..005075ff00f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -222,7 +222,7 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
222 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 222 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
223 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 223 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
224 PAGE_TABLE_BLOCK_SIZE, 224 PAGE_TABLE_BLOCK_SIZE,
225 amdgpu_vm_block_size - 9); 225 adev->vm_manager.block_size - 9);
226 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL) + i, tmp); 226 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL) + i, tmp);
227 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32) + i*2, 0); 227 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32) + i*2, 0);
228 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32) + i*2, 0); 228 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32) + i*2, 0);
@@ -299,36 +299,6 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
299 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp); 299 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp);
300} 300}
301 301
302static uint32_t gfxhub_v1_0_get_invalidate_req(unsigned int vm_id)
303{
304 u32 req = 0;
305
306 /* invalidate using legacy mode on vm_id*/
307 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
308 PER_VMID_INVALIDATE_REQ, 1 << vm_id);
309 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
310 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
311 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
312 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
313 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
314 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
315 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
316 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
317
318 return req;
319}
320
321static uint32_t gfxhub_v1_0_get_vm_protection_bits(void)
322{
323 return (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
324 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
325 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
326 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
327 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
328 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
329 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
330}
331
332static int gfxhub_v1_0_early_init(void *handle) 302static int gfxhub_v1_0_early_init(void *handle)
333{ 303{
334 return 0; 304 return 0;
@@ -361,9 +331,6 @@ static int gfxhub_v1_0_sw_init(void *handle)
361 hub->vm_l2_pro_fault_cntl = 331 hub->vm_l2_pro_fault_cntl =
362 SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL); 332 SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
363 333
364 hub->get_invalidate_req = gfxhub_v1_0_get_invalidate_req;
365 hub->get_vm_protection_bits = gfxhub_v1_0_get_vm_protection_bits;
366
367 return 0; 334 return 0;
368} 335}
369 336
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index d9586601a437..631aef38126d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -543,7 +543,8 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
543 WREG32(mmVM_CONTEXT1_CNTL, 543 WREG32(mmVM_CONTEXT1_CNTL,
544 VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK | 544 VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
545 (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) | 545 (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
546 ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT)); 546 ((adev->vm_manager.block_size - 9)
547 << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
547 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) 548 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
548 gmc_v6_0_set_fault_enable_default(adev, false); 549 gmc_v6_0_set_fault_enable_default(adev, false);
549 else 550 else
@@ -848,7 +849,8 @@ static int gmc_v6_0_sw_init(void *handle)
848 if (r) 849 if (r)
849 return r; 850 return r;
850 851
851 adev->vm_manager.max_pfn = amdgpu_vm_size << 18; 852 amdgpu_vm_adjust_size(adev, 64);
853 adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
852 854
853 adev->mc.mc_mask = 0xffffffffffULL; 855 adev->mc.mc_mask = 0xffffffffffULL;
854 856
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 0c0a6015cca5..92abe12d92bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -37,6 +37,8 @@
37#include "oss/oss_2_0_d.h" 37#include "oss/oss_2_0_d.h"
38#include "oss/oss_2_0_sh_mask.h" 38#include "oss/oss_2_0_sh_mask.h"
39 39
40#include "amdgpu_atombios.h"
41
40static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev); 42static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
41static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); 43static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
42static int gmc_v7_0_wait_for_idle(void *handle); 44static int gmc_v7_0_wait_for_idle(void *handle);
@@ -325,48 +327,51 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
325 */ 327 */
326static int gmc_v7_0_mc_init(struct amdgpu_device *adev) 328static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
327{ 329{
328 u32 tmp; 330 adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev);
329 int chansize, numchan; 331 if (!adev->mc.vram_width) {
330 332 u32 tmp;
331 /* Get VRAM informations */ 333 int chansize, numchan;
332 tmp = RREG32(mmMC_ARB_RAMCFG); 334
333 if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) { 335 /* Get VRAM informations */
334 chansize = 64; 336 tmp = RREG32(mmMC_ARB_RAMCFG);
335 } else { 337 if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
336 chansize = 32; 338 chansize = 64;
337 } 339 } else {
338 tmp = RREG32(mmMC_SHARED_CHMAP); 340 chansize = 32;
339 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { 341 }
340 case 0: 342 tmp = RREG32(mmMC_SHARED_CHMAP);
341 default: 343 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
342 numchan = 1; 344 case 0:
343 break; 345 default:
344 case 1: 346 numchan = 1;
345 numchan = 2; 347 break;
346 break; 348 case 1:
347 case 2: 349 numchan = 2;
348 numchan = 4; 350 break;
349 break; 351 case 2:
350 case 3: 352 numchan = 4;
351 numchan = 8; 353 break;
352 break; 354 case 3:
353 case 4: 355 numchan = 8;
354 numchan = 3; 356 break;
355 break; 357 case 4:
356 case 5: 358 numchan = 3;
357 numchan = 6; 359 break;
358 break; 360 case 5:
359 case 6: 361 numchan = 6;
360 numchan = 10; 362 break;
361 break; 363 case 6:
362 case 7: 364 numchan = 10;
363 numchan = 12; 365 break;
364 break; 366 case 7:
365 case 8: 367 numchan = 12;
366 numchan = 16; 368 break;
367 break; 369 case 8:
370 numchan = 16;
371 break;
372 }
373 adev->mc.vram_width = numchan * chansize;
368 } 374 }
369 adev->mc.vram_width = numchan * chansize;
370 /* Could aper size report 0 ? */ 375 /* Could aper size report 0 ? */
371 adev->mc.aper_base = pci_resource_start(adev->pdev, 0); 376 adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
372 adev->mc.aper_size = pci_resource_len(adev->pdev, 0); 377 adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
@@ -639,7 +644,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
639 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); 644 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
640 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1); 645 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
641 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, 646 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
642 amdgpu_vm_block_size - 9); 647 adev->vm_manager.block_size - 9);
643 WREG32(mmVM_CONTEXT1_CNTL, tmp); 648 WREG32(mmVM_CONTEXT1_CNTL, tmp);
644 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) 649 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
645 gmc_v7_0_set_fault_enable_default(adev, false); 650 gmc_v7_0_set_fault_enable_default(adev, false);
@@ -998,7 +1003,8 @@ static int gmc_v7_0_sw_init(void *handle)
998 * Currently set to 4GB ((1 << 20) 4k pages). 1003 * Currently set to 4GB ((1 << 20) 4k pages).
999 * Max GPUVM size for cayman and SI is 40 bits. 1004 * Max GPUVM size for cayman and SI is 40 bits.
1000 */ 1005 */
1001 adev->vm_manager.max_pfn = amdgpu_vm_size << 18; 1006 amdgpu_vm_adjust_size(adev, 64);
1007 adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
1002 1008
1003 /* Set the internal MC address mask 1009 /* Set the internal MC address mask
1004 * This is the max address of the GPU's 1010 * This is the max address of the GPU's
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index d19d1c5e2847..f2ccefc66fd4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -38,6 +38,8 @@
38#include "vid.h" 38#include "vid.h"
39#include "vi.h" 39#include "vi.h"
40 40
41#include "amdgpu_atombios.h"
42
41 43
42static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev); 44static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
43static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); 45static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -487,48 +489,51 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
487 */ 489 */
488static int gmc_v8_0_mc_init(struct amdgpu_device *adev) 490static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
489{ 491{
490 u32 tmp; 492 adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev);
491 int chansize, numchan; 493 if (!adev->mc.vram_width) {
492 494 u32 tmp;
493 /* Get VRAM informations */ 495 int chansize, numchan;
494 tmp = RREG32(mmMC_ARB_RAMCFG); 496
495 if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) { 497 /* Get VRAM informations */
496 chansize = 64; 498 tmp = RREG32(mmMC_ARB_RAMCFG);
497 } else { 499 if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
498 chansize = 32; 500 chansize = 64;
499 } 501 } else {
500 tmp = RREG32(mmMC_SHARED_CHMAP); 502 chansize = 32;
501 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { 503 }
502 case 0: 504 tmp = RREG32(mmMC_SHARED_CHMAP);
503 default: 505 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
504 numchan = 1; 506 case 0:
505 break; 507 default:
506 case 1: 508 numchan = 1;
507 numchan = 2; 509 break;
508 break; 510 case 1:
509 case 2: 511 numchan = 2;
510 numchan = 4; 512 break;
511 break; 513 case 2:
512 case 3: 514 numchan = 4;
513 numchan = 8; 515 break;
514 break; 516 case 3:
515 case 4: 517 numchan = 8;
516 numchan = 3; 518 break;
517 break; 519 case 4:
518 case 5: 520 numchan = 3;
519 numchan = 6; 521 break;
520 break; 522 case 5:
521 case 6: 523 numchan = 6;
522 numchan = 10; 524 break;
523 break; 525 case 6:
524 case 7: 526 numchan = 10;
525 numchan = 12; 527 break;
526 break; 528 case 7:
527 case 8: 529 numchan = 12;
528 numchan = 16; 530 break;
529 break; 531 case 8:
532 numchan = 16;
533 break;
534 }
535 adev->mc.vram_width = numchan * chansize;
530 } 536 }
531 adev->mc.vram_width = numchan * chansize;
532 /* Could aper size report 0 ? */ 537 /* Could aper size report 0 ? */
533 adev->mc.aper_base = pci_resource_start(adev->pdev, 0); 538 adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
534 adev->mc.aper_size = pci_resource_len(adev->pdev, 0); 539 adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
@@ -848,7 +853,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
848 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 853 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
849 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 854 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
850 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, 855 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
851 amdgpu_vm_block_size - 9); 856 adev->vm_manager.block_size - 9);
852 WREG32(mmVM_CONTEXT1_CNTL, tmp); 857 WREG32(mmVM_CONTEXT1_CNTL, tmp);
853 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) 858 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
854 gmc_v8_0_set_fault_enable_default(adev, false); 859 gmc_v8_0_set_fault_enable_default(adev, false);
@@ -1082,7 +1087,8 @@ static int gmc_v8_0_sw_init(void *handle)
1082 * Currently set to 4GB ((1 << 20) 4k pages). 1087 * Currently set to 4GB ((1 << 20) 4k pages).
1083 * Max GPUVM size for cayman and SI is 40 bits. 1088 * Max GPUVM size for cayman and SI is 40 bits.
1084 */ 1089 */
1085 adev->vm_manager.max_pfn = amdgpu_vm_size << 18; 1090 amdgpu_vm_adjust_size(adev, 64);
1091 adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
1086 1092
1087 /* Set the internal MC address mask 1093 /* Set the internal MC address mask
1088 * This is the max address of the GPU's 1094 * This is the max address of the GPU's
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index df69aae99df4..3b045e0b114e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -75,11 +75,18 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
75 struct amdgpu_vmhub *hub; 75 struct amdgpu_vmhub *hub;
76 u32 tmp, reg, bits, i; 76 u32 tmp, reg, bits, i;
77 77
78 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
79 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
80 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
81 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
82 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
83 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
84 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
85
78 switch (state) { 86 switch (state) {
79 case AMDGPU_IRQ_STATE_DISABLE: 87 case AMDGPU_IRQ_STATE_DISABLE:
80 /* MM HUB */ 88 /* MM HUB */
81 hub = &adev->vmhub[AMDGPU_MMHUB]; 89 hub = &adev->vmhub[AMDGPU_MMHUB];
82 bits = hub->get_vm_protection_bits();
83 for (i = 0; i< 16; i++) { 90 for (i = 0; i< 16; i++) {
84 reg = hub->vm_context0_cntl + i; 91 reg = hub->vm_context0_cntl + i;
85 tmp = RREG32(reg); 92 tmp = RREG32(reg);
@@ -89,7 +96,6 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
89 96
90 /* GFX HUB */ 97 /* GFX HUB */
91 hub = &adev->vmhub[AMDGPU_GFXHUB]; 98 hub = &adev->vmhub[AMDGPU_GFXHUB];
92 bits = hub->get_vm_protection_bits();
93 for (i = 0; i < 16; i++) { 99 for (i = 0; i < 16; i++) {
94 reg = hub->vm_context0_cntl + i; 100 reg = hub->vm_context0_cntl + i;
95 tmp = RREG32(reg); 101 tmp = RREG32(reg);
@@ -100,7 +106,6 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
100 case AMDGPU_IRQ_STATE_ENABLE: 106 case AMDGPU_IRQ_STATE_ENABLE:
101 /* MM HUB */ 107 /* MM HUB */
102 hub = &adev->vmhub[AMDGPU_MMHUB]; 108 hub = &adev->vmhub[AMDGPU_MMHUB];
103 bits = hub->get_vm_protection_bits();
104 for (i = 0; i< 16; i++) { 109 for (i = 0; i< 16; i++) {
105 reg = hub->vm_context0_cntl + i; 110 reg = hub->vm_context0_cntl + i;
106 tmp = RREG32(reg); 111 tmp = RREG32(reg);
@@ -110,7 +115,6 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
110 115
111 /* GFX HUB */ 116 /* GFX HUB */
112 hub = &adev->vmhub[AMDGPU_GFXHUB]; 117 hub = &adev->vmhub[AMDGPU_GFXHUB];
113 bits = hub->get_vm_protection_bits();
114 for (i = 0; i < 16; i++) { 118 for (i = 0; i < 16; i++) {
115 reg = hub->vm_context0_cntl + i; 119 reg = hub->vm_context0_cntl + i;
116 tmp = RREG32(reg); 120 tmp = RREG32(reg);
@@ -129,8 +133,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
129 struct amdgpu_irq_src *source, 133 struct amdgpu_irq_src *source,
130 struct amdgpu_iv_entry *entry) 134 struct amdgpu_iv_entry *entry)
131{ 135{
132 struct amdgpu_vmhub *gfxhub = &adev->vmhub[AMDGPU_GFXHUB]; 136 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vm_id_src];
133 struct amdgpu_vmhub *mmhub = &adev->vmhub[AMDGPU_MMHUB];
134 uint32_t status = 0; 137 uint32_t status = 0;
135 u64 addr; 138 u64 addr;
136 139
@@ -138,13 +141,8 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
138 addr |= ((u64)entry->src_data[1] & 0xf) << 44; 141 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
139 142
140 if (!amdgpu_sriov_vf(adev)) { 143 if (!amdgpu_sriov_vf(adev)) {
141 if (entry->vm_id_src) { 144 status = RREG32(hub->vm_l2_pro_fault_status);
142 status = RREG32(mmhub->vm_l2_pro_fault_status); 145 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
143 WREG32_P(mmhub->vm_l2_pro_fault_cntl, 1, ~1);
144 } else {
145 status = RREG32(gfxhub->vm_l2_pro_fault_status);
146 WREG32_P(gfxhub->vm_l2_pro_fault_cntl, 1, ~1);
147 }
148 } 146 }
149 147
150 if (printk_ratelimit()) { 148 if (printk_ratelimit()) {
@@ -175,6 +173,25 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
175 adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs; 173 adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
176} 174}
177 175
176static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id)
177{
178 u32 req = 0;
179
180 /* invalidate using legacy mode on vm_id*/
181 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
182 PER_VMID_INVALIDATE_REQ, 1 << vm_id);
183 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
184 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
185 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
186 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
187 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
188 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
189 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
190 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
191
192 return req;
193}
194
178/* 195/*
179 * GART 196 * GART
180 * VMID 0 is the physical GPU addresses as used by the kernel. 197 * VMID 0 is the physical GPU addresses as used by the kernel.
@@ -204,7 +221,7 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
204 221
205 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 222 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
206 struct amdgpu_vmhub *hub = &adev->vmhub[i]; 223 struct amdgpu_vmhub *hub = &adev->vmhub[i];
207 u32 tmp = hub->get_invalidate_req(vmid); 224 u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
208 225
209 WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); 226 WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
210 227
@@ -337,30 +354,23 @@ static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
337 return pte_flag; 354 return pte_flag;
338} 355}
339 356
340static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
341 .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
342 .set_pte_pde = gmc_v9_0_gart_set_pte_pde,
343 .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags
344};
345
346static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
347{
348 if (adev->gart.gart_funcs == NULL)
349 adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
350}
351
352static u64 gmc_v9_0_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr) 357static u64 gmc_v9_0_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
353{ 358{
354 return adev->vm_manager.vram_base_offset + mc_addr - adev->mc.vram_start; 359 return adev->vm_manager.vram_base_offset + mc_addr - adev->mc.vram_start;
355} 360}
356 361
357static const struct amdgpu_mc_funcs gmc_v9_0_mc_funcs = { 362static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
363 .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
364 .set_pte_pde = gmc_v9_0_gart_set_pte_pde,
365 .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
358 .adjust_mc_addr = gmc_v9_0_adjust_mc_addr, 366 .adjust_mc_addr = gmc_v9_0_adjust_mc_addr,
367 .get_invalidate_req = gmc_v9_0_get_invalidate_req,
359}; 368};
360 369
361static void gmc_v9_0_set_mc_funcs(struct amdgpu_device *adev) 370static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
362{ 371{
363 adev->mc.mc_funcs = &gmc_v9_0_mc_funcs; 372 if (adev->gart.gart_funcs == NULL)
373 adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
364} 374}
365 375
366static int gmc_v9_0_early_init(void *handle) 376static int gmc_v9_0_early_init(void *handle)
@@ -368,7 +378,6 @@ static int gmc_v9_0_early_init(void *handle)
368 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 378 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
369 379
370 gmc_v9_0_set_gart_funcs(adev); 380 gmc_v9_0_set_gart_funcs(adev);
371 gmc_v9_0_set_mc_funcs(adev);
372 gmc_v9_0_set_irq_funcs(adev); 381 gmc_v9_0_set_irq_funcs(adev);
373 382
374 return 0; 383 return 0;
@@ -511,7 +520,12 @@ static int gmc_v9_0_vm_init(struct amdgpu_device *adev)
511 * amdkfd will use VMIDs 8-15 520 * amdkfd will use VMIDs 8-15
512 */ 521 */
513 adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS; 522 adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
514 adev->vm_manager.num_level = 3; 523
524 /* TODO: fix num_level for APU when updating vm size and block size */
525 if (adev->flags & AMD_IS_APU)
526 adev->vm_manager.num_level = 1;
527 else
528 adev->vm_manager.num_level = 3;
515 amdgpu_vm_manager_init(adev); 529 amdgpu_vm_manager_init(adev);
516 530
517 /* base offset of vram pages */ 531 /* base offset of vram pages */
@@ -543,9 +557,20 @@ static int gmc_v9_0_sw_init(void *handle)
543 557
544 if (adev->flags & AMD_IS_APU) { 558 if (adev->flags & AMD_IS_APU) {
545 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; 559 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
560 amdgpu_vm_adjust_size(adev, 64);
546 } else { 561 } else {
547 /* XXX Don't know how to get VRAM type yet. */ 562 /* XXX Don't know how to get VRAM type yet. */
548 adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM; 563 adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM;
564 /*
565 * To fulfill 4-level page support,
566 * vm size is 256TB (48bit), maximum size of Vega10,
567 * block size 512 (9bit)
568 */
569 adev->vm_manager.vm_size = 1U << 18;
570 adev->vm_manager.block_size = 9;
571 DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
572 adev->vm_manager.vm_size,
573 adev->vm_manager.block_size);
549 } 574 }
550 575
551 /* This interrupt is VMC page fault.*/ 576 /* This interrupt is VMC page fault.*/
@@ -557,14 +582,7 @@ static int gmc_v9_0_sw_init(void *handle)
557 if (r) 582 if (r)
558 return r; 583 return r;
559 584
560 /* Because of four level VMPTs, vm size is at least 512GB. 585 adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
561 * The maximum size is 256TB (48bit).
562 */
563 if (amdgpu_vm_size < 512) {
564 DRM_WARN("VM size is at least 512GB!\n");
565 amdgpu_vm_size = 512;
566 }
567 adev->vm_manager.max_pfn = (uint64_t)amdgpu_vm_size << 18;
568 586
569 /* Set the internal MC address mask 587 /* Set the internal MC address mask
570 * This is the max address of the GPU's 588 * This is the max address of the GPU's
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 266a0f47a908..62684510ddcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -242,7 +242,7 @@ int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
242 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 242 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
243 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 243 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
244 PAGE_TABLE_BLOCK_SIZE, 244 PAGE_TABLE_BLOCK_SIZE,
245 amdgpu_vm_block_size - 9); 245 adev->vm_manager.block_size - 9);
246 WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL) + i, tmp); 246 WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL) + i, tmp);
247 WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32) + i*2, 0); 247 WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32) + i*2, 0);
248 WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32) + i*2, 0); 248 WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32) + i*2, 0);
@@ -317,36 +317,6 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
317 WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp); 317 WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp);
318} 318}
319 319
320static uint32_t mmhub_v1_0_get_invalidate_req(unsigned int vm_id)
321{
322 u32 req = 0;
323
324 /* invalidate using legacy mode on vm_id*/
325 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
326 PER_VMID_INVALIDATE_REQ, 1 << vm_id);
327 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
328 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
329 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
330 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
331 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
332 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
333 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
334 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
335
336 return req;
337}
338
339static uint32_t mmhub_v1_0_get_vm_protection_bits(void)
340{
341 return (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
342 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
343 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
344 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
345 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
346 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
347 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
348}
349
350static int mmhub_v1_0_early_init(void *handle) 320static int mmhub_v1_0_early_init(void *handle)
351{ 321{
352 return 0; 322 return 0;
@@ -379,9 +349,6 @@ static int mmhub_v1_0_sw_init(void *handle)
379 hub->vm_l2_pro_fault_cntl = 349 hub->vm_l2_pro_fault_cntl =
380 SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL); 350 SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
381 351
382 hub->get_invalidate_req = mmhub_v1_0_get_invalidate_req;
383 hub->get_vm_protection_bits = mmhub_v1_0_get_vm_protection_bits;
384
385 return 0; 352 return 0;
386} 353}
387 354
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index cfd5e54777bb..1493301b6a94 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -28,6 +28,7 @@
28#include "vega10/GC/gc_9_0_offset.h" 28#include "vega10/GC/gc_9_0_offset.h"
29#include "vega10/GC/gc_9_0_sh_mask.h" 29#include "vega10/GC/gc_9_0_sh_mask.h"
30#include "soc15.h" 30#include "soc15.h"
31#include "vega10_ih.h"
31#include "soc15_common.h" 32#include "soc15_common.h"
32#include "mxgpu_ai.h" 33#include "mxgpu_ai.h"
33 34
@@ -133,7 +134,7 @@ static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
133 return r; 134 return r;
134} 135}
135 136
136static int xgpu_vi_poll_msg(struct amdgpu_device *adev, enum idh_event event) 137static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
137{ 138{
138 int r = 0, timeout = AI_MAILBOX_TIMEDOUT; 139 int r = 0, timeout = AI_MAILBOX_TIMEDOUT;
139 140
@@ -172,7 +173,7 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
172 if (req == IDH_REQ_GPU_INIT_ACCESS || 173 if (req == IDH_REQ_GPU_INIT_ACCESS ||
173 req == IDH_REQ_GPU_FINI_ACCESS || 174 req == IDH_REQ_GPU_FINI_ACCESS ||
174 req == IDH_REQ_GPU_RESET_ACCESS) { 175 req == IDH_REQ_GPU_RESET_ACCESS) {
175 r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU); 176 r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
176 if (r) 177 if (r)
177 return r; 178 return r;
178 } 179 }
@@ -180,6 +181,11 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
180 return 0; 181 return 0;
181} 182}
182 183
184static int xgpu_ai_request_reset(struct amdgpu_device *adev)
185{
186 return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
187}
188
183static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev, 189static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
184 bool init) 190 bool init)
185{ 191{
@@ -201,7 +207,134 @@ static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev,
201 return r; 207 return r;
202} 208}
203 209
210static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev,
211 struct amdgpu_irq_src *source,
212 struct amdgpu_iv_entry *entry)
213{
214 DRM_DEBUG("get ack intr and do nothing.\n");
215 return 0;
216}
217
218static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev,
219 struct amdgpu_irq_src *source,
220 unsigned type,
221 enum amdgpu_interrupt_state state)
222{
223 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
224
225 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN,
226 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
227 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
228
229 return 0;
230}
231
232static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
233{
234 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
235 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
236
237 /* wait until RCV_MSG become 3 */
238 if (xgpu_ai_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
239 pr_err("failed to recieve FLR_CMPL\n");
240 return;
241 }
242
243 /* Trigger recovery due to world switch failure */
244 amdgpu_sriov_gpu_reset(adev, false);
245}
246
247static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
248 struct amdgpu_irq_src *src,
249 unsigned type,
250 enum amdgpu_interrupt_state state)
251{
252 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
253
254 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN,
255 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
256 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
257
258 return 0;
259}
260
261static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
262 struct amdgpu_irq_src *source,
263 struct amdgpu_iv_entry *entry)
264{
265 int r;
266
267 /* see what event we get */
268 r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
269
270 /* only handle FLR_NOTIFY now */
271 if (!r)
272 schedule_work(&adev->virt.flr_work);
273
274 return 0;
275}
276
277static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = {
278 .set = xgpu_ai_set_mailbox_ack_irq,
279 .process = xgpu_ai_mailbox_ack_irq,
280};
281
282static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = {
283 .set = xgpu_ai_set_mailbox_rcv_irq,
284 .process = xgpu_ai_mailbox_rcv_irq,
285};
286
287void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev)
288{
289 adev->virt.ack_irq.num_types = 1;
290 adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs;
291 adev->virt.rcv_irq.num_types = 1;
292 adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs;
293}
294
295int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
296{
297 int r;
298
299 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
300 if (r)
301 return r;
302
303 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
304 if (r) {
305 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
306 return r;
307 }
308
309 return 0;
310}
311
312int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
313{
314 int r;
315
316 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
317 if (r)
318 return r;
319 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
320 if (r) {
321 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
322 return r;
323 }
324
325 INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
326
327 return 0;
328}
329
330void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
331{
332 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
333 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
334}
335
204const struct amdgpu_virt_ops xgpu_ai_virt_ops = { 336const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
205 .req_full_gpu = xgpu_ai_request_full_gpu_access, 337 .req_full_gpu = xgpu_ai_request_full_gpu_access,
206 .rel_full_gpu = xgpu_ai_release_full_gpu_access, 338 .rel_full_gpu = xgpu_ai_release_full_gpu_access,
339 .reset_gpu = xgpu_ai_request_reset,
207}; 340};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index bf8ab8fd4367..9aefc44d2c34 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -24,7 +24,7 @@
24#ifndef __MXGPU_AI_H__ 24#ifndef __MXGPU_AI_H__
25#define __MXGPU_AI_H__ 25#define __MXGPU_AI_H__
26 26
27#define AI_MAILBOX_TIMEDOUT 150000 27#define AI_MAILBOX_TIMEDOUT 5000
28 28
29enum idh_request { 29enum idh_request {
30 IDH_REQ_GPU_INIT_ACCESS = 1, 30 IDH_REQ_GPU_INIT_ACCESS = 1,
@@ -44,4 +44,9 @@ enum idh_event {
44 44
45extern const struct amdgpu_virt_ops xgpu_ai_virt_ops; 45extern const struct amdgpu_virt_ops xgpu_ai_virt_ops;
46 46
47void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev);
48int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev);
49int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev);
50void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev);
51
47#endif 52#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 5191c45ffdf3..c3588d1c7cb0 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -491,7 +491,7 @@ bool psp_v3_1_compare_sram_data(struct psp_context *psp,
491 491
492 ucode_size = ucode->ucode_size; 492 ucode_size = ucode->ucode_size;
493 ucode_mem = (uint32_t *)ucode->kaddr; 493 ucode_mem = (uint32_t *)ucode->kaddr;
494 while (!ucode_size) { 494 while (ucode_size) {
495 fw_sram_reg_val = RREG32(fw_sram_data_reg_offset); 495 fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
496 496
497 if (*ucode_mem != fw_sram_reg_val) 497 if (*ucode_mem != fw_sram_reg_val)
@@ -508,14 +508,10 @@ bool psp_v3_1_compare_sram_data(struct psp_context *psp,
508bool psp_v3_1_smu_reload_quirk(struct psp_context *psp) 508bool psp_v3_1_smu_reload_quirk(struct psp_context *psp)
509{ 509{
510 struct amdgpu_device *adev = psp->adev; 510 struct amdgpu_device *adev = psp->adev;
511 uint32_t reg, reg_val; 511 uint32_t reg;
512 512
513 reg_val = (smnMP1_FIRMWARE_FLAGS & 0xffffffff) | 0x03b00000; 513 reg = smnMP1_FIRMWARE_FLAGS | 0x03b00000;
514 WREG32(SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2), reg_val); 514 WREG32(SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2), reg);
515 reg = RREG32(SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2)); 515 reg = RREG32(SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2));
516 if ((reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 516 return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false;
517 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
518 return true;
519
520 return false;
521} 517}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 2dd2b20d727e..21f38d882335 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1039,6 +1039,7 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1039static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 1039static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1040 unsigned vm_id, uint64_t pd_addr) 1040 unsigned vm_id, uint64_t pd_addr)
1041{ 1041{
1042 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
1042 unsigned eng = ring->idx; 1043 unsigned eng = ring->idx;
1043 unsigned i; 1044 unsigned i;
1044 1045
@@ -1048,7 +1049,6 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1048 1049
1049 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 1050 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
1050 struct amdgpu_vmhub *hub = &ring->adev->vmhub[i]; 1051 struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
1051 uint32_t req = hub->get_invalidate_req(vm_id);
1052 1052
1053 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | 1053 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1054 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); 1054 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index bb14a45997b5..385de8617075 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -106,6 +106,8 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
106 106
107 if (adev->asic_type == CHIP_VEGA10) 107 if (adev->asic_type == CHIP_VEGA10)
108 nbio_pcie_id = &nbio_v6_1_pcie_index_data; 108 nbio_pcie_id = &nbio_v6_1_pcie_index_data;
109 else
110 BUG();
109 111
110 address = nbio_pcie_id->index_offset; 112 address = nbio_pcie_id->index_offset;
111 data = nbio_pcie_id->data_offset; 113 data = nbio_pcie_id->data_offset;
@@ -125,6 +127,8 @@ static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
125 127
126 if (adev->asic_type == CHIP_VEGA10) 128 if (adev->asic_type == CHIP_VEGA10)
127 nbio_pcie_id = &nbio_v6_1_pcie_index_data; 129 nbio_pcie_id = &nbio_v6_1_pcie_index_data;
130 else
131 BUG();
128 132
129 address = nbio_pcie_id->index_offset; 133 address = nbio_pcie_id->index_offset;
130 data = nbio_pcie_id->data_offset; 134 data = nbio_pcie_id->data_offset;
@@ -493,7 +497,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
493 amdgpu_ip_block_add(adev, &mmhub_v1_0_ip_block); 497 amdgpu_ip_block_add(adev, &mmhub_v1_0_ip_block);
494 amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block); 498 amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block);
495 amdgpu_ip_block_add(adev, &vega10_ih_ip_block); 499 amdgpu_ip_block_add(adev, &vega10_ih_ip_block);
496 amdgpu_ip_block_add(adev, &psp_v3_1_ip_block); 500 if (amdgpu_fw_load_type == 2 || amdgpu_fw_load_type == -1)
501 amdgpu_ip_block_add(adev, &psp_v3_1_ip_block);
497 if (!amdgpu_sriov_vf(adev)) 502 if (!amdgpu_sriov_vf(adev))
498 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); 503 amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
499 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 504 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
@@ -558,6 +563,7 @@ static int soc15_common_early_init(void *handle)
558 563
559 if (amdgpu_sriov_vf(adev)) { 564 if (amdgpu_sriov_vf(adev)) {
560 amdgpu_virt_init_setting(adev); 565 amdgpu_virt_init_setting(adev);
566 xgpu_ai_mailbox_set_irq_funcs(adev);
561 } 567 }
562 568
563 /* 569 /*
@@ -610,8 +616,23 @@ static int soc15_common_early_init(void *handle)
610 return 0; 616 return 0;
611} 617}
612 618
619static int soc15_common_late_init(void *handle)
620{
621 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
622
623 if (amdgpu_sriov_vf(adev))
624 xgpu_ai_mailbox_get_irq(adev);
625
626 return 0;
627}
628
613static int soc15_common_sw_init(void *handle) 629static int soc15_common_sw_init(void *handle)
614{ 630{
631 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
632
633 if (amdgpu_sriov_vf(adev))
634 xgpu_ai_mailbox_add_irq_id(adev);
635
615 return 0; 636 return 0;
616} 637}
617 638
@@ -642,6 +663,8 @@ static int soc15_common_hw_fini(void *handle)
642 663
643 /* disable the doorbell aperture */ 664 /* disable the doorbell aperture */
644 soc15_enable_doorbell_aperture(adev, false); 665 soc15_enable_doorbell_aperture(adev, false);
666 if (amdgpu_sriov_vf(adev))
667 xgpu_ai_mailbox_put_irq(adev);
645 668
646 return 0; 669 return 0;
647} 670}
@@ -855,7 +878,7 @@ static int soc15_common_set_powergating_state(void *handle,
855const struct amd_ip_funcs soc15_common_ip_funcs = { 878const struct amd_ip_funcs soc15_common_ip_funcs = {
856 .name = "soc15_common", 879 .name = "soc15_common",
857 .early_init = soc15_common_early_init, 880 .early_init = soc15_common_early_init,
858 .late_init = NULL, 881 .late_init = soc15_common_late_init,
859 .sw_init = soc15_common_sw_init, 882 .sw_init = soc15_common_sw_init,
860 .sw_fini = soc15_common_sw_fini, 883 .sw_fini = soc15_common_sw_fini,
861 .hw_init = soc15_common_hw_init, 884 .hw_init = soc15_common_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 9a4129d881aa..8ab0f78794a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -135,12 +135,9 @@ static int uvd_v4_2_sw_fini(void *handle)
135 if (r) 135 if (r)
136 return r; 136 return r;
137 137
138 r = amdgpu_uvd_sw_fini(adev); 138 return amdgpu_uvd_sw_fini(adev);
139 if (r)
140 return r;
141
142 return r;
143} 139}
140
144static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, 141static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
145 bool enable); 142 bool enable);
146/** 143/**
@@ -230,11 +227,7 @@ static int uvd_v4_2_suspend(void *handle)
230 if (r) 227 if (r)
231 return r; 228 return r;
232 229
233 r = amdgpu_uvd_suspend(adev); 230 return amdgpu_uvd_suspend(adev);
234 if (r)
235 return r;
236
237 return r;
238} 231}
239 232
240static int uvd_v4_2_resume(void *handle) 233static int uvd_v4_2_resume(void *handle)
@@ -246,11 +239,7 @@ static int uvd_v4_2_resume(void *handle)
246 if (r) 239 if (r)
247 return r; 240 return r;
248 241
249 r = uvd_v4_2_hw_init(adev); 242 return uvd_v4_2_hw_init(adev);
250 if (r)
251 return r;
252
253 return r;
254} 243}
255 244
256/** 245/**
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index e448f7d86bc0..bb6d46e168a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -131,11 +131,7 @@ static int uvd_v5_0_sw_fini(void *handle)
131 if (r) 131 if (r)
132 return r; 132 return r;
133 133
134 r = amdgpu_uvd_sw_fini(adev); 134 return amdgpu_uvd_sw_fini(adev);
135 if (r)
136 return r;
137
138 return r;
139} 135}
140 136
141/** 137/**
@@ -228,11 +224,7 @@ static int uvd_v5_0_suspend(void *handle)
228 return r; 224 return r;
229 uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE); 225 uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
230 226
231 r = amdgpu_uvd_suspend(adev); 227 return amdgpu_uvd_suspend(adev);
232 if (r)
233 return r;
234
235 return r;
236} 228}
237 229
238static int uvd_v5_0_resume(void *handle) 230static int uvd_v5_0_resume(void *handle)
@@ -244,11 +236,7 @@ static int uvd_v5_0_resume(void *handle)
244 if (r) 236 if (r)
245 return r; 237 return r;
246 238
247 r = uvd_v5_0_hw_init(adev); 239 return uvd_v5_0_hw_init(adev);
248 if (r)
249 return r;
250
251 return r;
252} 240}
253 241
254/** 242/**
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 5679a4249bd9..31db356476f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -134,11 +134,7 @@ static int uvd_v6_0_sw_fini(void *handle)
134 if (r) 134 if (r)
135 return r; 135 return r;
136 136
137 r = amdgpu_uvd_sw_fini(adev); 137 return amdgpu_uvd_sw_fini(adev);
138 if (r)
139 return r;
140
141 return r;
142} 138}
143 139
144/** 140/**
@@ -230,11 +226,8 @@ static int uvd_v6_0_suspend(void *handle)
230 return r; 226 return r;
231 227
232 /* Skip this for APU for now */ 228 /* Skip this for APU for now */
233 if (!(adev->flags & AMD_IS_APU)) { 229 if (!(adev->flags & AMD_IS_APU))
234 r = amdgpu_uvd_suspend(adev); 230 r = amdgpu_uvd_suspend(adev);
235 if (r)
236 return r;
237 }
238 231
239 return r; 232 return r;
240} 233}
@@ -250,11 +243,7 @@ static int uvd_v6_0_resume(void *handle)
250 if (r) 243 if (r)
251 return r; 244 return r;
252 } 245 }
253 r = uvd_v6_0_hw_init(adev); 246 return uvd_v6_0_hw_init(adev);
254 if (r)
255 return r;
256
257 return r;
258} 247}
259 248
260/** 249/**
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 13f52e0af9b8..9bcf01469282 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -438,11 +438,7 @@ static int uvd_v7_0_sw_fini(void *handle)
438 for (i = 0; i < adev->uvd.num_enc_rings; ++i) 438 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
439 amdgpu_ring_fini(&adev->uvd.ring_enc[i]); 439 amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
440 440
441 r = amdgpu_uvd_sw_fini(adev); 441 return amdgpu_uvd_sw_fini(adev);
442 if (r)
443 return r;
444
445 return r;
446} 442}
447 443
448/** 444/**
@@ -547,11 +543,8 @@ static int uvd_v7_0_suspend(void *handle)
547 return r; 543 return r;
548 544
549 /* Skip this for APU for now */ 545 /* Skip this for APU for now */
550 if (!(adev->flags & AMD_IS_APU)) { 546 if (!(adev->flags & AMD_IS_APU))
551 r = amdgpu_uvd_suspend(adev); 547 r = amdgpu_uvd_suspend(adev);
552 if (r)
553 return r;
554 }
555 548
556 return r; 549 return r;
557} 550}
@@ -567,11 +560,7 @@ static int uvd_v7_0_resume(void *handle)
567 if (r) 560 if (r)
568 return r; 561 return r;
569 } 562 }
570 r = uvd_v7_0_hw_init(adev); 563 return uvd_v7_0_hw_init(adev);
571 if (r)
572 return r;
573
574 return r;
575} 564}
576 565
577/** 566/**
@@ -1045,6 +1034,7 @@ static void uvd_v7_0_vm_reg_wait(struct amdgpu_ring *ring,
1045static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 1034static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1046 unsigned vm_id, uint64_t pd_addr) 1035 unsigned vm_id, uint64_t pd_addr)
1047{ 1036{
1037 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
1048 uint32_t data0, data1, mask; 1038 uint32_t data0, data1, mask;
1049 unsigned eng = ring->idx; 1039 unsigned eng = ring->idx;
1050 unsigned i; 1040 unsigned i;
@@ -1055,7 +1045,6 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1055 1045
1056 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 1046 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
1057 struct amdgpu_vmhub *hub = &ring->adev->vmhub[i]; 1047 struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
1058 uint32_t req = hub->get_invalidate_req(vm_id);
1059 1048
1060 data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2; 1049 data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
1061 data1 = upper_32_bits(pd_addr); 1050 data1 = upper_32_bits(pd_addr);
@@ -1091,6 +1080,7 @@ static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1091static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, 1080static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1092 unsigned int vm_id, uint64_t pd_addr) 1081 unsigned int vm_id, uint64_t pd_addr)
1093{ 1082{
1083 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
1094 unsigned eng = ring->idx; 1084 unsigned eng = ring->idx;
1095 unsigned i; 1085 unsigned i;
1096 1086
@@ -1100,7 +1090,6 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1100 1090
1101 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 1091 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
1102 struct amdgpu_vmhub *hub = &ring->adev->vmhub[i]; 1092 struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
1103 uint32_t req = hub->get_invalidate_req(vm_id);
1104 1093
1105 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE); 1094 amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1106 amdgpu_ring_write(ring, 1095 amdgpu_ring_write(ring,
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 49a6c45e65be..47f70827195b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -451,11 +451,7 @@ static int vce_v2_0_sw_fini(void *handle)
451 if (r) 451 if (r)
452 return r; 452 return r;
453 453
454 r = amdgpu_vce_sw_fini(adev); 454 return amdgpu_vce_sw_fini(adev);
455 if (r)
456 return r;
457
458 return r;
459} 455}
460 456
461static int vce_v2_0_hw_init(void *handle) 457static int vce_v2_0_hw_init(void *handle)
@@ -495,11 +491,7 @@ static int vce_v2_0_suspend(void *handle)
495 if (r) 491 if (r)
496 return r; 492 return r;
497 493
498 r = amdgpu_vce_suspend(adev); 494 return amdgpu_vce_suspend(adev);
499 if (r)
500 return r;
501
502 return r;
503} 495}
504 496
505static int vce_v2_0_resume(void *handle) 497static int vce_v2_0_resume(void *handle)
@@ -511,11 +503,7 @@ static int vce_v2_0_resume(void *handle)
511 if (r) 503 if (r)
512 return r; 504 return r;
513 505
514 r = vce_v2_0_hw_init(adev); 506 return vce_v2_0_hw_init(adev);
515 if (r)
516 return r;
517
518 return r;
519} 507}
520 508
521static int vce_v2_0_soft_reset(void *handle) 509static int vce_v2_0_soft_reset(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index db0adac073c6..fb0819359909 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -417,11 +417,7 @@ static int vce_v3_0_sw_fini(void *handle)
417 if (r) 417 if (r)
418 return r; 418 return r;
419 419
420 r = amdgpu_vce_sw_fini(adev); 420 return amdgpu_vce_sw_fini(adev);
421 if (r)
422 return r;
423
424 return r;
425} 421}
426 422
427static int vce_v3_0_hw_init(void *handle) 423static int vce_v3_0_hw_init(void *handle)
@@ -471,11 +467,7 @@ static int vce_v3_0_suspend(void *handle)
471 if (r) 467 if (r)
472 return r; 468 return r;
473 469
474 r = amdgpu_vce_suspend(adev); 470 return amdgpu_vce_suspend(adev);
475 if (r)
476 return r;
477
478 return r;
479} 471}
480 472
481static int vce_v3_0_resume(void *handle) 473static int vce_v3_0_resume(void *handle)
@@ -487,11 +479,7 @@ static int vce_v3_0_resume(void *handle)
487 if (r) 479 if (r)
488 return r; 480 return r;
489 481
490 r = vce_v3_0_hw_init(adev); 482 return vce_v3_0_hw_init(adev);
491 if (r)
492 return r;
493
494 return r;
495} 483}
496 484
497static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) 485static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index becc5f744a98..edde5fe938d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -527,11 +527,7 @@ static int vce_v4_0_sw_fini(void *handle)
527 if (r) 527 if (r)
528 return r; 528 return r;
529 529
530 r = amdgpu_vce_sw_fini(adev); 530 return amdgpu_vce_sw_fini(adev);
531 if (r)
532 return r;
533
534 return r;
535} 531}
536 532
537static int vce_v4_0_hw_init(void *handle) 533static int vce_v4_0_hw_init(void *handle)
@@ -584,11 +580,7 @@ static int vce_v4_0_suspend(void *handle)
584 if (r) 580 if (r)
585 return r; 581 return r;
586 582
587 r = amdgpu_vce_suspend(adev); 583 return amdgpu_vce_suspend(adev);
588 if (r)
589 return r;
590
591 return r;
592} 584}
593 585
594static int vce_v4_0_resume(void *handle) 586static int vce_v4_0_resume(void *handle)
@@ -600,11 +592,7 @@ static int vce_v4_0_resume(void *handle)
600 if (r) 592 if (r)
601 return r; 593 return r;
602 594
603 r = vce_v4_0_hw_init(adev); 595 return vce_v4_0_hw_init(adev);
604 if (r)
605 return r;
606
607 return r;
608} 596}
609 597
610static void vce_v4_0_mc_resume(struct amdgpu_device *adev) 598static void vce_v4_0_mc_resume(struct amdgpu_device *adev)
@@ -985,6 +973,7 @@ static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring)
985static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring, 973static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
986 unsigned int vm_id, uint64_t pd_addr) 974 unsigned int vm_id, uint64_t pd_addr)
987{ 975{
976 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
988 unsigned eng = ring->idx; 977 unsigned eng = ring->idx;
989 unsigned i; 978 unsigned i;
990 979
@@ -994,7 +983,6 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
994 983
995 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 984 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
996 struct amdgpu_vmhub *hub = &ring->adev->vmhub[i]; 985 struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
997 uint32_t req = hub->get_invalidate_req(vm_id);
998 986
999 amdgpu_ring_write(ring, VCE_CMD_REG_WRITE); 987 amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
1000 amdgpu_ring_write(ring, 988 amdgpu_ring_write(ring,
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index b3a86e0e96e6..5f2ab9c1609a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -362,7 +362,89 @@
362#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88 362#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
363#define PACKET3_SWITCH_BUFFER 0x8B 363#define PACKET3_SWITCH_BUFFER 0x8B
364#define PACKET3_SET_RESOURCES 0xA0 364#define PACKET3_SET_RESOURCES 0xA0
365/* 1. header
366 * 2. CONTROL
367 * 3. QUEUE_MASK_LO [31:0]
368 * 4. QUEUE_MASK_HI [31:0]
369 * 5. GWS_MASK_LO [31:0]
370 * 6. GWS_MASK_HI [31:0]
371 * 7. OAC_MASK [15:0]
372 * 8. GDS_HEAP_SIZE [16:11] | GDS_HEAP_BASE [5:0]
373 */
374# define PACKET3_SET_RESOURCES_VMID_MASK(x) ((x) << 0)
375# define PACKET3_SET_RESOURCES_UNMAP_LATENTY(x) ((x) << 16)
376# define PACKET3_SET_RESOURCES_QUEUE_TYPE(x) ((x) << 29)
365#define PACKET3_MAP_QUEUES 0xA2 377#define PACKET3_MAP_QUEUES 0xA2
378/* 1. header
379 * 2. CONTROL
380 * 3. CONTROL2
381 * 4. MQD_ADDR_LO [31:0]
382 * 5. MQD_ADDR_HI [31:0]
383 * 6. WPTR_ADDR_LO [31:0]
384 * 7. WPTR_ADDR_HI [31:0]
385 */
386/* CONTROL */
387# define PACKET3_MAP_QUEUES_QUEUE_SEL(x) ((x) << 4)
388# define PACKET3_MAP_QUEUES_VMID(x) ((x) << 8)
389# define PACKET3_MAP_QUEUES_QUEUE_TYPE(x) ((x) << 21)
390# define PACKET3_MAP_QUEUES_ALLOC_FORMAT(x) ((x) << 24)
391# define PACKET3_MAP_QUEUES_ENGINE_SEL(x) ((x) << 26)
392# define PACKET3_MAP_QUEUES_NUM_QUEUES(x) ((x) << 29)
393/* CONTROL2 */
394# define PACKET3_MAP_QUEUES_CHECK_DISABLE(x) ((x) << 1)
395# define PACKET3_MAP_QUEUES_DOORBELL_OFFSET(x) ((x) << 2)
396# define PACKET3_MAP_QUEUES_QUEUE(x) ((x) << 26)
397# define PACKET3_MAP_QUEUES_PIPE(x) ((x) << 29)
398# define PACKET3_MAP_QUEUES_ME(x) ((x) << 31)
399#define PACKET3_UNMAP_QUEUES 0xA3
400/* 1. header
401 * 2. CONTROL
402 * 3. CONTROL2
403 * 4. CONTROL3
404 * 5. CONTROL4
405 * 6. CONTROL5
406 */
407/* CONTROL */
408# define PACKET3_UNMAP_QUEUES_ACTION(x) ((x) << 0)
409 /* 0 - PREEMPT_QUEUES
410 * 1 - RESET_QUEUES
411 * 2 - DISABLE_PROCESS_QUEUES
412 * 3 - PREEMPT_QUEUES_NO_UNMAP
413 */
414# define PACKET3_UNMAP_QUEUES_QUEUE_SEL(x) ((x) << 4)
415# define PACKET3_UNMAP_QUEUES_ENGINE_SEL(x) ((x) << 26)
416# define PACKET3_UNMAP_QUEUES_NUM_QUEUES(x) ((x) << 29)
417/* CONTROL2a */
418# define PACKET3_UNMAP_QUEUES_PASID(x) ((x) << 0)
419/* CONTROL2b */
420# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(x) ((x) << 2)
421/* CONTROL3a */
422# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET1(x) ((x) << 2)
423/* CONTROL3b */
424# define PACKET3_UNMAP_QUEUES_RB_WPTR(x) ((x) << 0)
425/* CONTROL4 */
426# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET2(x) ((x) << 2)
427/* CONTROL5 */
428# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET3(x) ((x) << 2)
429#define PACKET3_QUERY_STATUS 0xA4
430/* 1. header
431 * 2. CONTROL
432 * 3. CONTROL2
433 * 4. ADDR_LO [31:0]
434 * 5. ADDR_HI [31:0]
435 * 6. DATA_LO [31:0]
436 * 7. DATA_HI [31:0]
437 */
438/* CONTROL */
439# define PACKET3_QUERY_STATUS_CONTEXT_ID(x) ((x) << 0)
440# define PACKET3_QUERY_STATUS_INTERRUPT_SEL(x) ((x) << 28)
441# define PACKET3_QUERY_STATUS_COMMAND(x) ((x) << 30)
442/* CONTROL2a */
443# define PACKET3_QUERY_STATUS_PASID(x) ((x) << 0)
444/* CONTROL2b */
445# define PACKET3_QUERY_STATUS_DOORBELL_OFFSET(x) ((x) << 2)
446# define PACKET3_QUERY_STATUS_ENG_SEL(x) ((x) << 25)
447
366 448
367#define VCE_CMD_NO_OP 0x00000000 449#define VCE_CMD_NO_OP 0x00000000
368#define VCE_CMD_END 0x00000001 450#define VCE_CMD_END 0x00000001
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index dfd4fe6f0578..9da5b0bb66d8 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -493,8 +493,10 @@ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id,
493 { 493 {
494 enum amd_pm_state_type ps; 494 enum amd_pm_state_type ps;
495 495
496 if (input == NULL) 496 if (input == NULL) {
497 return -EINVAL; 497 ret = -EINVAL;
498 break;
499 }
498 ps = *(unsigned long *)input; 500 ps = *(unsigned long *)input;
499 501
500 data.requested_ui_label = power_state_convert(ps); 502 data.requested_ui_label = power_state_convert(ps);
@@ -539,15 +541,19 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
539 switch (state->classification.ui_label) { 541 switch (state->classification.ui_label) {
540 case PP_StateUILabel_Battery: 542 case PP_StateUILabel_Battery:
541 pm_type = POWER_STATE_TYPE_BATTERY; 543 pm_type = POWER_STATE_TYPE_BATTERY;
544 break;
542 case PP_StateUILabel_Balanced: 545 case PP_StateUILabel_Balanced:
543 pm_type = POWER_STATE_TYPE_BALANCED; 546 pm_type = POWER_STATE_TYPE_BALANCED;
547 break;
544 case PP_StateUILabel_Performance: 548 case PP_StateUILabel_Performance:
545 pm_type = POWER_STATE_TYPE_PERFORMANCE; 549 pm_type = POWER_STATE_TYPE_PERFORMANCE;
550 break;
546 default: 551 default:
547 if (state->classification.flags & PP_StateClassificationFlag_Boot) 552 if (state->classification.flags & PP_StateClassificationFlag_Boot)
548 pm_type = POWER_STATE_TYPE_INTERNAL_BOOT; 553 pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
549 else 554 else
550 pm_type = POWER_STATE_TYPE_DEFAULT; 555 pm_type = POWER_STATE_TYPE_DEFAULT;
556 break;
551 } 557 }
552 mutex_unlock(&pp_handle->pp_lock); 558 mutex_unlock(&pp_handle->pp_lock);
553 559
@@ -894,7 +900,7 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
894 900
895 mutex_lock(&pp_handle->pp_lock); 901 mutex_lock(&pp_handle->pp_lock);
896 ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value); 902 ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
897 mutex_lock(&pp_handle->pp_lock); 903 mutex_unlock(&pp_handle->pp_lock);
898 return ret; 904 return ret;
899} 905}
900 906
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h
index 8e53d3a5e725..6a907c93fd9c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h
@@ -250,6 +250,29 @@ typedef struct _ATOM_Vega10_Fan_Table {
250 USHORT usFanStartTemperature; 250 USHORT usFanStartTemperature;
251} ATOM_Vega10_Fan_Table; 251} ATOM_Vega10_Fan_Table;
252 252
253typedef struct _ATOM_Vega10_Fan_Table_V2 {
254 UCHAR ucRevId;
255 USHORT usFanOutputSensitivity;
256 USHORT usFanAcousticLimitRpm;
257 USHORT usThrottlingRPM;
258 USHORT usTargetTemperature;
259 USHORT usMinimumPWMLimit;
260 USHORT usTargetGfxClk;
261 USHORT usFanGainEdge;
262 USHORT usFanGainHotspot;
263 USHORT usFanGainLiquid;
264 USHORT usFanGainVrVddc;
265 USHORT usFanGainVrMvdd;
266 USHORT usFanGainPlx;
267 USHORT usFanGainHbm;
268 UCHAR ucEnableZeroRPM;
269 USHORT usFanStopTemperature;
270 USHORT usFanStartTemperature;
271 UCHAR ucFanParameters;
272 UCHAR ucFanMinRPM;
273 UCHAR ucFanMaxRPM;
274} ATOM_Vega10_Fan_Table_V2;
275
253typedef struct _ATOM_Vega10_Thermal_Controller { 276typedef struct _ATOM_Vega10_Thermal_Controller {
254 UCHAR ucRevId; 277 UCHAR ucRevId;
255 UCHAR ucType; /* one of ATOM_VEGA10_PP_THERMALCONTROLLER_*/ 278 UCHAR ucType; /* one of ATOM_VEGA10_PP_THERMALCONTROLLER_*/
@@ -305,6 +328,33 @@ typedef struct _ATOM_Vega10_PowerTune_Table {
305 USHORT usTemperatureLimitTedge; 328 USHORT usTemperatureLimitTedge;
306} ATOM_Vega10_PowerTune_Table; 329} ATOM_Vega10_PowerTune_Table;
307 330
331typedef struct _ATOM_Vega10_PowerTune_Table_V2
332{
333 UCHAR ucRevId;
334 USHORT usSocketPowerLimit;
335 USHORT usBatteryPowerLimit;
336 USHORT usSmallPowerLimit;
337 USHORT usTdcLimit;
338 USHORT usEdcLimit;
339 USHORT usSoftwareShutdownTemp;
340 USHORT usTemperatureLimitHotSpot;
341 USHORT usTemperatureLimitLiquid1;
342 USHORT usTemperatureLimitLiquid2;
343 USHORT usTemperatureLimitHBM;
344 USHORT usTemperatureLimitVrSoc;
345 USHORT usTemperatureLimitVrMem;
346 USHORT usTemperatureLimitPlx;
347 USHORT usLoadLineResistance;
348 UCHAR ucLiquid1_I2C_address;
349 UCHAR ucLiquid2_I2C_address;
350 UCHAR ucLiquid_I2C_Line;
351 UCHAR ucVr_I2C_address;
352 UCHAR ucVr_I2C_Line;
353 UCHAR ucPlx_I2C_address;
354 UCHAR ucPlx_I2C_Line;
355 USHORT usTemperatureLimitTedge;
356} ATOM_Vega10_PowerTune_Table_V2;
357
308typedef struct _ATOM_Vega10_Hard_Limit_Record { 358typedef struct _ATOM_Vega10_Hard_Limit_Record {
309 ULONG ulSOCCLKLimit; 359 ULONG ulSOCCLKLimit;
310 ULONG ulGFXCLKLimit; 360 ULONG ulGFXCLKLimit;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
index 518634f995e7..8b55ae01132d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
@@ -116,14 +116,16 @@ static int init_thermal_controller(
116 const ATOM_Vega10_POWERPLAYTABLE *powerplay_table) 116 const ATOM_Vega10_POWERPLAYTABLE *powerplay_table)
117{ 117{
118 const ATOM_Vega10_Thermal_Controller *thermal_controller; 118 const ATOM_Vega10_Thermal_Controller *thermal_controller;
119 const ATOM_Vega10_Fan_Table *fan_table; 119 const Vega10_PPTable_Generic_SubTable_Header *header;
120 const ATOM_Vega10_Fan_Table *fan_table_v1;
121 const ATOM_Vega10_Fan_Table_V2 *fan_table_v2;
120 122
121 thermal_controller = (ATOM_Vega10_Thermal_Controller *) 123 thermal_controller = (ATOM_Vega10_Thermal_Controller *)
122 (((unsigned long)powerplay_table) + 124 (((unsigned long)powerplay_table) +
123 le16_to_cpu(powerplay_table->usThermalControllerOffset)); 125 le16_to_cpu(powerplay_table->usThermalControllerOffset));
124 126
125 PP_ASSERT_WITH_CODE((powerplay_table->usThermalControllerOffset != 0), 127 PP_ASSERT_WITH_CODE((powerplay_table->usThermalControllerOffset != 0),
126 "Thermal controller table not set!", return -1); 128 "Thermal controller table not set!", return -EINVAL);
127 129
128 hwmgr->thermal_controller.ucType = thermal_controller->ucType; 130 hwmgr->thermal_controller.ucType = thermal_controller->ucType;
129 hwmgr->thermal_controller.ucI2cLine = thermal_controller->ucI2cLine; 131 hwmgr->thermal_controller.ucI2cLine = thermal_controller->ucI2cLine;
@@ -142,6 +144,9 @@ static int init_thermal_controller(
142 hwmgr->thermal_controller.fanInfo.ulMaxRPM = 144 hwmgr->thermal_controller.fanInfo.ulMaxRPM =
143 thermal_controller->ucFanMaxRPM * 100UL; 145 thermal_controller->ucFanMaxRPM * 100UL;
144 146
147 hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay
148 = 100000;
149
145 set_hw_cap( 150 set_hw_cap(
146 hwmgr, 151 hwmgr,
147 ATOM_VEGA10_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType, 152 ATOM_VEGA10_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType,
@@ -150,54 +155,101 @@ static int init_thermal_controller(
150 if (!powerplay_table->usFanTableOffset) 155 if (!powerplay_table->usFanTableOffset)
151 return 0; 156 return 0;
152 157
153 fan_table = (const ATOM_Vega10_Fan_Table *) 158 header = (const Vega10_PPTable_Generic_SubTable_Header *)
154 (((unsigned long)powerplay_table) + 159 (((unsigned long)powerplay_table) +
155 le16_to_cpu(powerplay_table->usFanTableOffset)); 160 le16_to_cpu(powerplay_table->usFanTableOffset));
156 161
157 PP_ASSERT_WITH_CODE((fan_table->ucRevId >= 8), 162 if (header->ucRevId == 10) {
158 "Invalid Input Fan Table!", return -1); 163 fan_table_v1 = (ATOM_Vega10_Fan_Table *)header;
159 164
160 hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay 165 PP_ASSERT_WITH_CODE((fan_table_v1->ucRevId >= 8),
161 = 100000; 166 "Invalid Input Fan Table!", return -EINVAL);
162 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 167
163 PHM_PlatformCaps_MicrocodeFanControl); 168 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
164 169 PHM_PlatformCaps_MicrocodeFanControl);
165 hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity = 170
166 le16_to_cpu(fan_table->usFanOutputSensitivity); 171 hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
167 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = 172 le16_to_cpu(fan_table_v1->usFanOutputSensitivity);
168 le16_to_cpu(fan_table->usFanRPMMax); 173 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
169 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 174 le16_to_cpu(fan_table_v1->usFanRPMMax);
170 le16_to_cpu(fan_table->usThrottlingRPM); 175 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit =
171 hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit = 176 le16_to_cpu(fan_table_v1->usThrottlingRPM);
172 le32_to_cpu((uint32_t)(fan_table->usFanAcousticLimit)); 177 hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit =
173 hwmgr->thermal_controller.advanceFanControlParameters.usTMax = 178 le16_to_cpu(fan_table_v1->usFanAcousticLimit);
174 le16_to_cpu(fan_table->usTargetTemperature); 179 hwmgr->thermal_controller.advanceFanControlParameters.usTMax =
175 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin = 180 le16_to_cpu(fan_table_v1->usTargetTemperature);
176 le16_to_cpu(fan_table->usMinimumPWMLimit); 181 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin =
177 hwmgr->thermal_controller.advanceFanControlParameters.ulTargetGfxClk = 182 le16_to_cpu(fan_table_v1->usMinimumPWMLimit);
178 le32_to_cpu((uint32_t)(fan_table->usTargetGfxClk)); 183 hwmgr->thermal_controller.advanceFanControlParameters.ulTargetGfxClk =
179 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge = 184 le16_to_cpu(fan_table_v1->usTargetGfxClk);
180 le16_to_cpu(fan_table->usFanGainEdge); 185 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge =
181 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot = 186 le16_to_cpu(fan_table_v1->usFanGainEdge);
182 le16_to_cpu(fan_table->usFanGainHotspot); 187 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot =
183 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid = 188 le16_to_cpu(fan_table_v1->usFanGainHotspot);
184 le16_to_cpu(fan_table->usFanGainLiquid); 189 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid =
185 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc = 190 le16_to_cpu(fan_table_v1->usFanGainLiquid);
186 le16_to_cpu(fan_table->usFanGainVrVddc); 191 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc =
187 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd = 192 le16_to_cpu(fan_table_v1->usFanGainVrVddc);
188 le16_to_cpu(fan_table->usFanGainVrMvdd); 193 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd =
189 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx = 194 le16_to_cpu(fan_table_v1->usFanGainVrMvdd);
190 le16_to_cpu(fan_table->usFanGainPlx); 195 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx =
191 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm = 196 le16_to_cpu(fan_table_v1->usFanGainPlx);
192 le16_to_cpu(fan_table->usFanGainHbm); 197 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm =
193 198 le16_to_cpu(fan_table_v1->usFanGainHbm);
194 hwmgr->thermal_controller.advanceFanControlParameters.ucEnableZeroRPM = 199
195 fan_table->ucEnableZeroRPM; 200 hwmgr->thermal_controller.advanceFanControlParameters.ucEnableZeroRPM =
196 hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStopTemperature = 201 fan_table_v1->ucEnableZeroRPM;
197 le16_to_cpu(fan_table->usFanStopTemperature); 202 hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStopTemperature =
198 hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStartTemperature = 203 le16_to_cpu(fan_table_v1->usFanStopTemperature);
199 le16_to_cpu(fan_table->usFanStartTemperature); 204 hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStartTemperature =
205 le16_to_cpu(fan_table_v1->usFanStartTemperature);
206 } else if (header->ucRevId > 10) {
207 fan_table_v2 = (ATOM_Vega10_Fan_Table_V2 *)header;
208
209 hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution =
210 fan_table_v2->ucFanParameters & ATOM_VEGA10_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
211 hwmgr->thermal_controller.fanInfo.ulMinRPM = fan_table_v2->ucFanMinRPM * 100UL;
212 hwmgr->thermal_controller.fanInfo.ulMaxRPM = fan_table_v2->ucFanMaxRPM * 100UL;
200 213
214 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
215 PHM_PlatformCaps_MicrocodeFanControl);
216
217 hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
218 le16_to_cpu(fan_table_v2->usFanOutputSensitivity);
219 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
220 fan_table_v2->ucFanMaxRPM * 100UL;
221 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit =
222 le16_to_cpu(fan_table_v2->usThrottlingRPM);
223 hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit =
224 le16_to_cpu(fan_table_v2->usFanAcousticLimitRpm);
225 hwmgr->thermal_controller.advanceFanControlParameters.usTMax =
226 le16_to_cpu(fan_table_v2->usTargetTemperature);
227 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin =
228 le16_to_cpu(fan_table_v2->usMinimumPWMLimit);
229 hwmgr->thermal_controller.advanceFanControlParameters.ulTargetGfxClk =
230 le16_to_cpu(fan_table_v2->usTargetGfxClk);
231 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge =
232 le16_to_cpu(fan_table_v2->usFanGainEdge);
233 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot =
234 le16_to_cpu(fan_table_v2->usFanGainHotspot);
235 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid =
236 le16_to_cpu(fan_table_v2->usFanGainLiquid);
237 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc =
238 le16_to_cpu(fan_table_v2->usFanGainVrVddc);
239 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd =
240 le16_to_cpu(fan_table_v2->usFanGainVrMvdd);
241 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx =
242 le16_to_cpu(fan_table_v2->usFanGainPlx);
243 hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm =
244 le16_to_cpu(fan_table_v2->usFanGainHbm);
245
246 hwmgr->thermal_controller.advanceFanControlParameters.ucEnableZeroRPM =
247 fan_table_v2->ucEnableZeroRPM;
248 hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStopTemperature =
249 le16_to_cpu(fan_table_v2->usFanStopTemperature);
250 hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStartTemperature =
251 le16_to_cpu(fan_table_v2->usFanStartTemperature);
252 }
201 return 0; 253 return 0;
202} 254}
203 255
@@ -261,6 +313,48 @@ static int get_mm_clock_voltage_table(
261 return 0; 313 return 0;
262} 314}
263 315
316static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t* sda)
317{
318 switch(line){
319 case Vega10_I2CLineID_DDC1:
320 *scl = Vega10_I2C_DDC1CLK;
321 *sda = Vega10_I2C_DDC1DATA;
322 break;
323 case Vega10_I2CLineID_DDC2:
324 *scl = Vega10_I2C_DDC2CLK;
325 *sda = Vega10_I2C_DDC2DATA;
326 break;
327 case Vega10_I2CLineID_DDC3:
328 *scl = Vega10_I2C_DDC3CLK;
329 *sda = Vega10_I2C_DDC3DATA;
330 break;
331 case Vega10_I2CLineID_DDC4:
332 *scl = Vega10_I2C_DDC4CLK;
333 *sda = Vega10_I2C_DDC4DATA;
334 break;
335 case Vega10_I2CLineID_DDC5:
336 *scl = Vega10_I2C_DDC5CLK;
337 *sda = Vega10_I2C_DDC5DATA;
338 break;
339 case Vega10_I2CLineID_DDC6:
340 *scl = Vega10_I2C_DDC6CLK;
341 *sda = Vega10_I2C_DDC6DATA;
342 break;
343 case Vega10_I2CLineID_SCLSDA:
344 *scl = Vega10_I2C_SCL;
345 *sda = Vega10_I2C_SDA;
346 break;
347 case Vega10_I2CLineID_DDCVGA:
348 *scl = Vega10_I2C_DDCVGACLK;
349 *sda = Vega10_I2C_DDCVGADATA;
350 break;
351 default:
352 *scl = 0;
353 *sda = 0;
354 break;
355 }
356}
357
264static int get_tdp_table( 358static int get_tdp_table(
265 struct pp_hwmgr *hwmgr, 359 struct pp_hwmgr *hwmgr,
266 struct phm_tdp_table **info_tdp_table, 360 struct phm_tdp_table **info_tdp_table,
@@ -268,59 +362,99 @@ static int get_tdp_table(
268{ 362{
269 uint32_t table_size; 363 uint32_t table_size;
270 struct phm_tdp_table *tdp_table; 364 struct phm_tdp_table *tdp_table;
271 365 uint8_t scl;
272 const ATOM_Vega10_PowerTune_Table *power_tune_table = 366 uint8_t sda;
273 (ATOM_Vega10_PowerTune_Table *)table; 367 const ATOM_Vega10_PowerTune_Table *power_tune_table;
274 368 const ATOM_Vega10_PowerTune_Table_V2 *power_tune_table_v2;
275 table_size = sizeof(uint32_t) + sizeof(struct phm_cac_tdp_table);
276 hwmgr->dyn_state.cac_dtp_table = (struct phm_cac_tdp_table *)
277 kzalloc(table_size, GFP_KERNEL);
278
279 if (!hwmgr->dyn_state.cac_dtp_table)
280 return -ENOMEM;
281 369
282 table_size = sizeof(uint32_t) + sizeof(struct phm_tdp_table); 370 table_size = sizeof(uint32_t) + sizeof(struct phm_tdp_table);
371
283 tdp_table = kzalloc(table_size, GFP_KERNEL); 372 tdp_table = kzalloc(table_size, GFP_KERNEL);
284 373
285 if (!tdp_table) { 374 if (!tdp_table)
286 kfree(hwmgr->dyn_state.cac_dtp_table);
287 hwmgr->dyn_state.cac_dtp_table = NULL;
288 return -ENOMEM; 375 return -ENOMEM;
289 }
290 376
291 tdp_table->usMaximumPowerDeliveryLimit = le16_to_cpu(power_tune_table->usSocketPowerLimit); 377 if (table->ucRevId == 5) {
292 tdp_table->usTDC = le16_to_cpu(power_tune_table->usTdcLimit); 378 power_tune_table = (ATOM_Vega10_PowerTune_Table *)table;
293 tdp_table->usEDCLimit = le16_to_cpu(power_tune_table->usEdcLimit); 379 tdp_table->usMaximumPowerDeliveryLimit = le16_to_cpu(power_tune_table->usSocketPowerLimit);
294 tdp_table->usSoftwareShutdownTemp = 380 tdp_table->usTDC = le16_to_cpu(power_tune_table->usTdcLimit);
295 le16_to_cpu(power_tune_table->usSoftwareShutdownTemp); 381 tdp_table->usEDCLimit = le16_to_cpu(power_tune_table->usEdcLimit);
296 tdp_table->usTemperatureLimitTedge = 382 tdp_table->usSoftwareShutdownTemp =
297 le16_to_cpu(power_tune_table->usTemperatureLimitTedge); 383 le16_to_cpu(power_tune_table->usSoftwareShutdownTemp);
298 tdp_table->usTemperatureLimitHotspot = 384 tdp_table->usTemperatureLimitTedge =
299 le16_to_cpu(power_tune_table->usTemperatureLimitHotSpot); 385 le16_to_cpu(power_tune_table->usTemperatureLimitTedge);
300 tdp_table->usTemperatureLimitLiquid1 = 386 tdp_table->usTemperatureLimitHotspot =
301 le16_to_cpu(power_tune_table->usTemperatureLimitLiquid1); 387 le16_to_cpu(power_tune_table->usTemperatureLimitHotSpot);
302 tdp_table->usTemperatureLimitLiquid2 = 388 tdp_table->usTemperatureLimitLiquid1 =
303 le16_to_cpu(power_tune_table->usTemperatureLimitLiquid2); 389 le16_to_cpu(power_tune_table->usTemperatureLimitLiquid1);
304 tdp_table->usTemperatureLimitHBM = 390 tdp_table->usTemperatureLimitLiquid2 =
305 le16_to_cpu(power_tune_table->usTemperatureLimitHBM); 391 le16_to_cpu(power_tune_table->usTemperatureLimitLiquid2);
306 tdp_table->usTemperatureLimitVrVddc = 392 tdp_table->usTemperatureLimitHBM =
307 le16_to_cpu(power_tune_table->usTemperatureLimitVrSoc); 393 le16_to_cpu(power_tune_table->usTemperatureLimitHBM);
308 tdp_table->usTemperatureLimitVrMvdd = 394 tdp_table->usTemperatureLimitVrVddc =
309 le16_to_cpu(power_tune_table->usTemperatureLimitVrMem); 395 le16_to_cpu(power_tune_table->usTemperatureLimitVrSoc);
310 tdp_table->usTemperatureLimitPlx = 396 tdp_table->usTemperatureLimitVrMvdd =
311 le16_to_cpu(power_tune_table->usTemperatureLimitPlx); 397 le16_to_cpu(power_tune_table->usTemperatureLimitVrMem);
312 tdp_table->ucLiquid1_I2C_address = power_tune_table->ucLiquid1_I2C_address; 398 tdp_table->usTemperatureLimitPlx =
313 tdp_table->ucLiquid2_I2C_address = power_tune_table->ucLiquid2_I2C_address; 399 le16_to_cpu(power_tune_table->usTemperatureLimitPlx);
314 tdp_table->ucLiquid_I2C_Line = power_tune_table->ucLiquid_I2C_LineSCL; 400 tdp_table->ucLiquid1_I2C_address = power_tune_table->ucLiquid1_I2C_address;
315 tdp_table->ucLiquid_I2C_LineSDA = power_tune_table->ucLiquid_I2C_LineSDA; 401 tdp_table->ucLiquid2_I2C_address = power_tune_table->ucLiquid2_I2C_address;
316 tdp_table->ucVr_I2C_address = power_tune_table->ucVr_I2C_address; 402 tdp_table->ucLiquid_I2C_Line = power_tune_table->ucLiquid_I2C_LineSCL;
317 tdp_table->ucVr_I2C_Line = power_tune_table->ucVr_I2C_LineSCL; 403 tdp_table->ucLiquid_I2C_LineSDA = power_tune_table->ucLiquid_I2C_LineSDA;
318 tdp_table->ucVr_I2C_LineSDA = power_tune_table->ucVr_I2C_LineSDA; 404 tdp_table->ucVr_I2C_address = power_tune_table->ucVr_I2C_address;
319 tdp_table->ucPlx_I2C_address = power_tune_table->ucPlx_I2C_address; 405 tdp_table->ucVr_I2C_Line = power_tune_table->ucVr_I2C_LineSCL;
320 tdp_table->ucPlx_I2C_Line = power_tune_table->ucPlx_I2C_LineSCL; 406 tdp_table->ucVr_I2C_LineSDA = power_tune_table->ucVr_I2C_LineSDA;
321 tdp_table->ucPlx_I2C_LineSDA = power_tune_table->ucPlx_I2C_LineSDA; 407 tdp_table->ucPlx_I2C_address = power_tune_table->ucPlx_I2C_address;
322 408 tdp_table->ucPlx_I2C_Line = power_tune_table->ucPlx_I2C_LineSCL;
323 hwmgr->platform_descriptor.LoadLineSlope = power_tune_table->usLoadLineResistance; 409 tdp_table->ucPlx_I2C_LineSDA = power_tune_table->ucPlx_I2C_LineSDA;
410 hwmgr->platform_descriptor.LoadLineSlope = power_tune_table->usLoadLineResistance;
411 } else {
412 power_tune_table_v2 = (ATOM_Vega10_PowerTune_Table_V2 *)table;
413 tdp_table->usMaximumPowerDeliveryLimit = le16_to_cpu(power_tune_table_v2->usSocketPowerLimit);
414 tdp_table->usTDC = le16_to_cpu(power_tune_table_v2->usTdcLimit);
415 tdp_table->usEDCLimit = le16_to_cpu(power_tune_table_v2->usEdcLimit);
416 tdp_table->usSoftwareShutdownTemp =
417 le16_to_cpu(power_tune_table_v2->usSoftwareShutdownTemp);
418 tdp_table->usTemperatureLimitTedge =
419 le16_to_cpu(power_tune_table_v2->usTemperatureLimitTedge);
420 tdp_table->usTemperatureLimitHotspot =
421 le16_to_cpu(power_tune_table_v2->usTemperatureLimitHotSpot);
422 tdp_table->usTemperatureLimitLiquid1 =
423 le16_to_cpu(power_tune_table_v2->usTemperatureLimitLiquid1);
424 tdp_table->usTemperatureLimitLiquid2 =
425 le16_to_cpu(power_tune_table_v2->usTemperatureLimitLiquid2);
426 tdp_table->usTemperatureLimitHBM =
427 le16_to_cpu(power_tune_table_v2->usTemperatureLimitHBM);
428 tdp_table->usTemperatureLimitVrVddc =
429 le16_to_cpu(power_tune_table_v2->usTemperatureLimitVrSoc);
430 tdp_table->usTemperatureLimitVrMvdd =
431 le16_to_cpu(power_tune_table_v2->usTemperatureLimitVrMem);
432 tdp_table->usTemperatureLimitPlx =
433 le16_to_cpu(power_tune_table_v2->usTemperatureLimitPlx);
434 tdp_table->ucLiquid1_I2C_address = power_tune_table_v2->ucLiquid1_I2C_address;
435 tdp_table->ucLiquid2_I2C_address = power_tune_table_v2->ucLiquid2_I2C_address;
436
437 get_scl_sda_value(power_tune_table_v2->ucLiquid_I2C_Line, &scl, &sda);
438
439 tdp_table->ucLiquid_I2C_Line = scl;
440 tdp_table->ucLiquid_I2C_LineSDA = sda;
441
442 tdp_table->ucVr_I2C_address = power_tune_table_v2->ucVr_I2C_address;
443
444 get_scl_sda_value(power_tune_table_v2->ucVr_I2C_Line, &scl, &sda);
445
446 tdp_table->ucVr_I2C_Line = scl;
447 tdp_table->ucVr_I2C_LineSDA = sda;
448 tdp_table->ucPlx_I2C_address = power_tune_table_v2->ucPlx_I2C_address;
449
450 get_scl_sda_value(power_tune_table_v2->ucPlx_I2C_Line, &scl, &sda);
451
452 tdp_table->ucPlx_I2C_Line = scl;
453 tdp_table->ucPlx_I2C_LineSDA = sda;
454
455 hwmgr->platform_descriptor.LoadLineSlope =
456 power_tune_table_v2->usLoadLineResistance;
457 }
324 458
325 *info_tdp_table = tdp_table; 459 *info_tdp_table = tdp_table;
326 460
@@ -836,7 +970,7 @@ static int init_dpm_2_parameters(
836 (((unsigned long)powerplay_table) + 970 (((unsigned long)powerplay_table) +
837 le16_to_cpu(powerplay_table->usVddcLookupTableOffset)); 971 le16_to_cpu(powerplay_table->usVddcLookupTableOffset));
838 result = get_vddc_lookup_table(hwmgr, 972 result = get_vddc_lookup_table(hwmgr,
839 &pp_table_info->vddc_lookup_table, vddc_table, 16); 973 &pp_table_info->vddc_lookup_table, vddc_table, 8);
840 } 974 }
841 975
842 if (powerplay_table->usVddmemLookupTableOffset) { 976 if (powerplay_table->usVddmemLookupTableOffset) {
@@ -845,7 +979,7 @@ static int init_dpm_2_parameters(
845 (((unsigned long)powerplay_table) + 979 (((unsigned long)powerplay_table) +
846 le16_to_cpu(powerplay_table->usVddmemLookupTableOffset)); 980 le16_to_cpu(powerplay_table->usVddmemLookupTableOffset));
847 result = get_vddc_lookup_table(hwmgr, 981 result = get_vddc_lookup_table(hwmgr,
848 &pp_table_info->vddmem_lookup_table, vdd_mem_table, 16); 982 &pp_table_info->vddmem_lookup_table, vdd_mem_table, 4);
849 } 983 }
850 984
851 if (powerplay_table->usVddciLookupTableOffset) { 985 if (powerplay_table->usVddciLookupTableOffset) {
@@ -854,7 +988,7 @@ static int init_dpm_2_parameters(
854 (((unsigned long)powerplay_table) + 988 (((unsigned long)powerplay_table) +
855 le16_to_cpu(powerplay_table->usVddciLookupTableOffset)); 989 le16_to_cpu(powerplay_table->usVddciLookupTableOffset));
856 result = get_vddc_lookup_table(hwmgr, 990 result = get_vddc_lookup_table(hwmgr,
857 &pp_table_info->vddci_lookup_table, vddci_table, 16); 991 &pp_table_info->vddci_lookup_table, vddci_table, 4);
858 } 992 }
859 993
860 return result; 994 return result;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h
index 995d133ba6aa..d83ed2af7aa3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h
@@ -26,6 +26,34 @@
26 26
27#include "hwmgr.h" 27#include "hwmgr.h"
28 28
29enum Vega10_I2CLineID {
30 Vega10_I2CLineID_DDC1 = 0x90,
31 Vega10_I2CLineID_DDC2 = 0x91,
32 Vega10_I2CLineID_DDC3 = 0x92,
33 Vega10_I2CLineID_DDC4 = 0x93,
34 Vega10_I2CLineID_DDC5 = 0x94,
35 Vega10_I2CLineID_DDC6 = 0x95,
36 Vega10_I2CLineID_SCLSDA = 0x96,
37 Vega10_I2CLineID_DDCVGA = 0x97
38};
39
40#define Vega10_I2C_DDC1DATA 0
41#define Vega10_I2C_DDC1CLK 1
42#define Vega10_I2C_DDC2DATA 2
43#define Vega10_I2C_DDC2CLK 3
44#define Vega10_I2C_DDC3DATA 4
45#define Vega10_I2C_DDC3CLK 5
46#define Vega10_I2C_SDA 40
47#define Vega10_I2C_SCL 41
48#define Vega10_I2C_DDC4DATA 65
49#define Vega10_I2C_DDC4CLK 66
50#define Vega10_I2C_DDC5DATA 0x48
51#define Vega10_I2C_DDC5CLK 0x49
52#define Vega10_I2C_DDC6DATA 0x4a
53#define Vega10_I2C_DDC6CLK 0x4b
54#define Vega10_I2C_DDCVGADATA 0x4c
55#define Vega10_I2C_DDCVGACLK 0x4d
56
29extern const struct pp_table_func vega10_pptable_funcs; 57extern const struct pp_table_func vega10_pptable_funcs;
30extern int vega10_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr); 58extern int vega10_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr);
31extern int vega10_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index, 59extern int vega10_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h
index aee021451d35..2037910adcb1 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h
@@ -30,7 +30,9 @@
30 * SMU TEAM: Always increment the interface version if 30 * SMU TEAM: Always increment the interface version if
31 * any structure is changed in this file 31 * any structure is changed in this file
32 */ 32 */
33#define SMU9_DRIVER_IF_VERSION 0xa 33#define SMU9_DRIVER_IF_VERSION 0xB
34
35#define PPTABLE_V10_SMU_VERSION 1
34 36
35#define NUM_GFXCLK_DPM_LEVELS 8 37#define NUM_GFXCLK_DPM_LEVELS 8
36#define NUM_UVD_DPM_LEVELS 8 38#define NUM_UVD_DPM_LEVELS 8
@@ -87,6 +89,11 @@ typedef struct {
87 int32_t a0; 89 int32_t a0;
88 int32_t a1; 90 int32_t a1;
89 int32_t a2; 91 int32_t a2;
92
93 uint8_t a0_shift;
94 uint8_t a1_shift;
95 uint8_t a2_shift;
96 uint8_t padding;
90} GbVdroopTable_t; 97} GbVdroopTable_t;
91 98
92typedef struct { 99typedef struct {
@@ -293,7 +300,9 @@ typedef struct {
293 uint16_t Platform_sigma; 300 uint16_t Platform_sigma;
294 uint16_t PSM_Age_CompFactor; 301 uint16_t PSM_Age_CompFactor;
295 302
296 uint32_t Reserved[20]; 303 uint32_t DpmLevelPowerDelta;
304
305 uint32_t Reserved[19];
297 306
298 /* Padding - ignore */ 307 /* Padding - ignore */
299 uint32_t MmHubPadding[7]; /* SMU internal use */ 308 uint32_t MmHubPadding[7]; /* SMU internal use */
@@ -350,8 +359,8 @@ typedef struct {
350typedef struct { 359typedef struct {
351 uint16_t avgPsmCount[30]; 360 uint16_t avgPsmCount[30];
352 uint16_t minPsmCount[30]; 361 uint16_t minPsmCount[30];
353 uint16_t avgPsmVoltage[30]; /* in mV with 2 fractional bits */ 362 float avgPsmVoltage[30];
354 uint16_t minPsmVoltage[30]; /* in mV with 2 fractional bits */ 363 float minPsmVoltage[30];
355 364
356 uint32_t MmHubPadding[7]; /* SMU internal use */ 365 uint32_t MmHubPadding[7]; /* SMU internal use */
357} AvfsDebugTable_t; 366} AvfsDebugTable_t;
@@ -414,5 +423,45 @@ typedef struct {
414#define UCLK_SWITCH_SLOW 0 423#define UCLK_SWITCH_SLOW 0
415#define UCLK_SWITCH_FAST 1 424#define UCLK_SWITCH_FAST 1
416 425
426/* GFX DIDT Configuration */
427#define SQ_Enable_MASK 0x1
428#define SQ_IR_MASK 0x2
429#define SQ_PCC_MASK 0x4
430#define SQ_EDC_MASK 0x8
431
432#define TCP_Enable_MASK 0x100
433#define TCP_IR_MASK 0x200
434#define TCP_PCC_MASK 0x400
435#define TCP_EDC_MASK 0x800
436
437#define TD_Enable_MASK 0x10000
438#define TD_IR_MASK 0x20000
439#define TD_PCC_MASK 0x40000
440#define TD_EDC_MASK 0x80000
441
442#define DB_Enable_MASK 0x1000000
443#define DB_IR_MASK 0x2000000
444#define DB_PCC_MASK 0x4000000
445#define DB_EDC_MASK 0x8000000
446
447#define SQ_Enable_SHIFT 0
448#define SQ_IR_SHIFT 1
449#define SQ_PCC_SHIFT 2
450#define SQ_EDC_SHIFT 3
451
452#define TCP_Enable_SHIFT 8
453#define TCP_IR_SHIFT 9
454#define TCP_PCC_SHIFT 10
455#define TCP_EDC_SHIFT 11
456
457#define TD_Enable_SHIFT 16
458#define TD_IR_SHIFT 17
459#define TD_PCC_SHIFT 18
460#define TD_EDC_SHIFT 19
461
462#define DB_Enable_SHIFT 24
463#define DB_IR_SHIFT 25
464#define DB_PCC_SHIFT 26
465#define DB_EDC_SHIFT 27
417 466
418#endif 467#endif
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 0e74880b5e94..0f49c4b12772 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -392,29 +392,13 @@ static int compare_dev(struct device *dev, void *data)
392 392
393static int hdlcd_probe(struct platform_device *pdev) 393static int hdlcd_probe(struct platform_device *pdev)
394{ 394{
395 struct device_node *port, *ep; 395 struct device_node *port;
396 struct component_match *match = NULL; 396 struct component_match *match = NULL;
397 397
398 if (!pdev->dev.of_node)
399 return -ENODEV;
400
401 /* there is only one output port inside each device, find it */ 398 /* there is only one output port inside each device, find it */
402 ep = of_graph_get_next_endpoint(pdev->dev.of_node, NULL); 399 port = of_graph_get_remote_node(pdev->dev.of_node, 0, 0);
403 if (!ep) 400 if (!port)
404 return -ENODEV;
405
406 if (!of_device_is_available(ep)) {
407 of_node_put(ep);
408 return -ENODEV; 401 return -ENODEV;
409 }
410
411 /* add the remote encoder port as component */
412 port = of_graph_get_remote_port_parent(ep);
413 of_node_put(ep);
414 if (!port || !of_device_is_available(port)) {
415 of_node_put(port);
416 return -EAGAIN;
417 }
418 402
419 drm_of_component_match_add(&pdev->dev, &match, compare_dev, port); 403 drm_of_component_match_add(&pdev->dev, &match, compare_dev, port);
420 of_node_put(port); 404 of_node_put(port);
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index f9d665550d3e..9446a673d469 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -16,6 +16,7 @@
16#include <drm/drm_crtc.h> 16#include <drm/drm_crtc.h>
17#include <drm/drm_crtc_helper.h> 17#include <drm/drm_crtc_helper.h>
18#include <linux/clk.h> 18#include <linux/clk.h>
19#include <linux/pm_runtime.h>
19#include <video/videomode.h> 20#include <video/videomode.h>
20 21
21#include "malidp_drv.h" 22#include "malidp_drv.h"
@@ -35,13 +36,6 @@ static bool malidp_crtc_mode_fixup(struct drm_crtc *crtc,
35 long rate, req_rate = mode->crtc_clock * 1000; 36 long rate, req_rate = mode->crtc_clock * 1000;
36 37
37 if (req_rate) { 38 if (req_rate) {
38 rate = clk_round_rate(hwdev->mclk, req_rate);
39 if (rate < req_rate) {
40 DRM_DEBUG_DRIVER("mclk clock unable to reach %d kHz\n",
41 mode->crtc_clock);
42 return false;
43 }
44
45 rate = clk_round_rate(hwdev->pxlclk, req_rate); 39 rate = clk_round_rate(hwdev->pxlclk, req_rate);
46 if (rate != req_rate) { 40 if (rate != req_rate) {
47 DRM_DEBUG_DRIVER("pxlclk doesn't support %ld Hz\n", 41 DRM_DEBUG_DRIVER("pxlclk doesn't support %ld Hz\n",
@@ -58,9 +52,14 @@ static void malidp_crtc_enable(struct drm_crtc *crtc)
58 struct malidp_drm *malidp = crtc_to_malidp_device(crtc); 52 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
59 struct malidp_hw_device *hwdev = malidp->dev; 53 struct malidp_hw_device *hwdev = malidp->dev;
60 struct videomode vm; 54 struct videomode vm;
55 int err = pm_runtime_get_sync(crtc->dev->dev);
61 56
62 drm_display_mode_to_videomode(&crtc->state->adjusted_mode, &vm); 57 if (err < 0) {
58 DRM_DEBUG_DRIVER("Failed to enable runtime power management: %d\n", err);
59 return;
60 }
63 61
62 drm_display_mode_to_videomode(&crtc->state->adjusted_mode, &vm);
64 clk_prepare_enable(hwdev->pxlclk); 63 clk_prepare_enable(hwdev->pxlclk);
65 64
66 /* We rely on firmware to set mclk to a sensible level. */ 65 /* We rely on firmware to set mclk to a sensible level. */
@@ -75,10 +74,254 @@ static void malidp_crtc_disable(struct drm_crtc *crtc)
75{ 74{
76 struct malidp_drm *malidp = crtc_to_malidp_device(crtc); 75 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
77 struct malidp_hw_device *hwdev = malidp->dev; 76 struct malidp_hw_device *hwdev = malidp->dev;
77 int err;
78 78
79 drm_crtc_vblank_off(crtc); 79 drm_crtc_vblank_off(crtc);
80 hwdev->enter_config_mode(hwdev); 80 hwdev->enter_config_mode(hwdev);
81 clk_disable_unprepare(hwdev->pxlclk); 81 clk_disable_unprepare(hwdev->pxlclk);
82
83 err = pm_runtime_put(crtc->dev->dev);
84 if (err < 0) {
85 DRM_DEBUG_DRIVER("Failed to disable runtime power management: %d\n", err);
86 }
87}
88
89static const struct gamma_curve_segment {
90 u16 start;
91 u16 end;
92} segments[MALIDP_COEFFTAB_NUM_COEFFS] = {
93 /* sector 0 */
94 { 0, 0 }, { 1, 1 }, { 2, 2 }, { 3, 3 },
95 { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 },
96 { 8, 8 }, { 9, 9 }, { 10, 10 }, { 11, 11 },
97 { 12, 12 }, { 13, 13 }, { 14, 14 }, { 15, 15 },
98 /* sector 1 */
99 { 16, 19 }, { 20, 23 }, { 24, 27 }, { 28, 31 },
100 /* sector 2 */
101 { 32, 39 }, { 40, 47 }, { 48, 55 }, { 56, 63 },
102 /* sector 3 */
103 { 64, 79 }, { 80, 95 }, { 96, 111 }, { 112, 127 },
104 /* sector 4 */
105 { 128, 159 }, { 160, 191 }, { 192, 223 }, { 224, 255 },
106 /* sector 5 */
107 { 256, 319 }, { 320, 383 }, { 384, 447 }, { 448, 511 },
108 /* sector 6 */
109 { 512, 639 }, { 640, 767 }, { 768, 895 }, { 896, 1023 },
110 { 1024, 1151 }, { 1152, 1279 }, { 1280, 1407 }, { 1408, 1535 },
111 { 1536, 1663 }, { 1664, 1791 }, { 1792, 1919 }, { 1920, 2047 },
112 { 2048, 2175 }, { 2176, 2303 }, { 2304, 2431 }, { 2432, 2559 },
113 { 2560, 2687 }, { 2688, 2815 }, { 2816, 2943 }, { 2944, 3071 },
114 { 3072, 3199 }, { 3200, 3327 }, { 3328, 3455 }, { 3456, 3583 },
115 { 3584, 3711 }, { 3712, 3839 }, { 3840, 3967 }, { 3968, 4095 },
116};
117
118#define DE_COEFTAB_DATA(a, b) ((((a) & 0xfff) << 16) | (((b) & 0xfff)))
119
120static void malidp_generate_gamma_table(struct drm_property_blob *lut_blob,
121 u32 coeffs[MALIDP_COEFFTAB_NUM_COEFFS])
122{
123 struct drm_color_lut *lut = (struct drm_color_lut *)lut_blob->data;
124 int i;
125
126 for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i) {
127 u32 a, b, delta_in, out_start, out_end;
128
129 delta_in = segments[i].end - segments[i].start;
130 /* DP has 12-bit internal precision for its LUTs. */
131 out_start = drm_color_lut_extract(lut[segments[i].start].green,
132 12);
133 out_end = drm_color_lut_extract(lut[segments[i].end].green, 12);
134 a = (delta_in == 0) ? 0 : ((out_end - out_start) * 256) / delta_in;
135 b = out_start;
136 coeffs[i] = DE_COEFTAB_DATA(a, b);
137 }
138}
139
140/*
141 * Check if there is a new gamma LUT and if it is of an acceptable size. Also,
142 * reject any LUTs that use distinct red, green, and blue curves.
143 */
144static int malidp_crtc_atomic_check_gamma(struct drm_crtc *crtc,
145 struct drm_crtc_state *state)
146{
147 struct malidp_crtc_state *mc = to_malidp_crtc_state(state);
148 struct drm_color_lut *lut;
149 size_t lut_size;
150 int i;
151
152 if (!state->color_mgmt_changed || !state->gamma_lut)
153 return 0;
154
155 if (crtc->state->gamma_lut &&
156 (crtc->state->gamma_lut->base.id == state->gamma_lut->base.id))
157 return 0;
158
159 if (state->gamma_lut->length % sizeof(struct drm_color_lut))
160 return -EINVAL;
161
162 lut_size = state->gamma_lut->length / sizeof(struct drm_color_lut);
163 if (lut_size != MALIDP_GAMMA_LUT_SIZE)
164 return -EINVAL;
165
166 lut = (struct drm_color_lut *)state->gamma_lut->data;
167 for (i = 0; i < lut_size; ++i)
168 if (!((lut[i].red == lut[i].green) &&
169 (lut[i].red == lut[i].blue)))
170 return -EINVAL;
171
172 if (!state->mode_changed) {
173 int ret;
174
175 state->mode_changed = true;
176 /*
177 * Kerneldoc for drm_atomic_helper_check_modeset mandates that
178 * it be invoked when the driver sets ->mode_changed. Since
179 * changing the gamma LUT doesn't depend on any external
180 * resources, it is safe to call it only once.
181 */
182 ret = drm_atomic_helper_check_modeset(crtc->dev, state->state);
183 if (ret)
184 return ret;
185 }
186
187 malidp_generate_gamma_table(state->gamma_lut, mc->gamma_coeffs);
188 return 0;
189}
190
191/*
192 * Check if there is a new CTM and if it contains valid input. Valid here means
193 * that the number is inside the representable range for a Q3.12 number,
194 * excluding truncating the fractional part of the input data.
195 *
196 * The COLORADJ registers can be changed atomically.
197 */
198static int malidp_crtc_atomic_check_ctm(struct drm_crtc *crtc,
199 struct drm_crtc_state *state)
200{
201 struct malidp_crtc_state *mc = to_malidp_crtc_state(state);
202 struct drm_color_ctm *ctm;
203 int i;
204
205 if (!state->color_mgmt_changed)
206 return 0;
207
208 if (!state->ctm)
209 return 0;
210
211 if (crtc->state->ctm && (crtc->state->ctm->base.id ==
212 state->ctm->base.id))
213 return 0;
214
215 /*
216 * The size of the ctm is checked in
217 * drm_atomic_replace_property_blob_from_id.
218 */
219 ctm = (struct drm_color_ctm *)state->ctm->data;
220 for (i = 0; i < ARRAY_SIZE(ctm->matrix); ++i) {
221 /* Convert from S31.32 to Q3.12. */
222 s64 val = ctm->matrix[i];
223 u32 mag = ((((u64)val) & ~BIT_ULL(63)) >> 20) &
224 GENMASK_ULL(14, 0);
225
226 /*
227 * Convert to 2s complement and check the destination's top bit
228 * for overflow. NB: Can't check before converting or it'd
229 * incorrectly reject the case:
230 * sign == 1
231 * mag == 0x2000
232 */
233 if (val & BIT_ULL(63))
234 mag = ~mag + 1;
235 if (!!(val & BIT_ULL(63)) != !!(mag & BIT(14)))
236 return -EINVAL;
237 mc->coloradj_coeffs[i] = mag;
238 }
239
240 return 0;
241}
242
243static int malidp_crtc_atomic_check_scaling(struct drm_crtc *crtc,
244 struct drm_crtc_state *state)
245{
246 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
247 struct malidp_hw_device *hwdev = malidp->dev;
248 struct malidp_crtc_state *cs = to_malidp_crtc_state(state);
249 struct malidp_se_config *s = &cs->scaler_config;
250 struct drm_plane *plane;
251 struct videomode vm;
252 const struct drm_plane_state *pstate;
253 u32 h_upscale_factor = 0; /* U16.16 */
254 u32 v_upscale_factor = 0; /* U16.16 */
255 u8 scaling = cs->scaled_planes_mask;
256 int ret;
257
258 if (!scaling) {
259 s->scale_enable = false;
260 goto mclk_calc;
261 }
262
263 /* The scaling engine can only handle one plane at a time. */
264 if (scaling & (scaling - 1))
265 return -EINVAL;
266
267 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
268 struct malidp_plane *mp = to_malidp_plane(plane);
269 u32 phase;
270
271 if (!(mp->layer->id & scaling))
272 continue;
273
274 /*
275 * Convert crtc_[w|h] to U32.32, then divide by U16.16 src_[w|h]
276 * to get the U16.16 result.
277 */
278 h_upscale_factor = div_u64((u64)pstate->crtc_w << 32,
279 pstate->src_w);
280 v_upscale_factor = div_u64((u64)pstate->crtc_h << 32,
281 pstate->src_h);
282
283 s->enhancer_enable = ((h_upscale_factor >> 16) >= 2 ||
284 (v_upscale_factor >> 16) >= 2);
285
286 s->input_w = pstate->src_w >> 16;
287 s->input_h = pstate->src_h >> 16;
288 s->output_w = pstate->crtc_w;
289 s->output_h = pstate->crtc_h;
290
291#define SE_N_PHASE 4
292#define SE_SHIFT_N_PHASE 12
293 /* Calculate initial_phase and delta_phase for horizontal. */
294 phase = s->input_w;
295 s->h_init_phase =
296 ((phase << SE_N_PHASE) / s->output_w + 1) / 2;
297
298 phase = s->input_w;
299 phase <<= (SE_SHIFT_N_PHASE + SE_N_PHASE);
300 s->h_delta_phase = phase / s->output_w;
301
302 /* Same for vertical. */
303 phase = s->input_h;
304 s->v_init_phase =
305 ((phase << SE_N_PHASE) / s->output_h + 1) / 2;
306
307 phase = s->input_h;
308 phase <<= (SE_SHIFT_N_PHASE + SE_N_PHASE);
309 s->v_delta_phase = phase / s->output_h;
310#undef SE_N_PHASE
311#undef SE_SHIFT_N_PHASE
312 s->plane_src_id = mp->layer->id;
313 }
314
315 s->scale_enable = true;
316 s->hcoeff = malidp_se_select_coeffs(h_upscale_factor);
317 s->vcoeff = malidp_se_select_coeffs(v_upscale_factor);
318
319mclk_calc:
320 drm_display_mode_to_videomode(&state->adjusted_mode, &vm);
321 ret = hwdev->se_calc_mclk(hwdev, s, &vm);
322 if (ret < 0)
323 return -EINVAL;
324 return 0;
82} 325}
83 326
84static int malidp_crtc_atomic_check(struct drm_crtc *crtc, 327static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
@@ -90,6 +333,7 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
90 const struct drm_plane_state *pstate; 333 const struct drm_plane_state *pstate;
91 u32 rot_mem_free, rot_mem_usable; 334 u32 rot_mem_free, rot_mem_usable;
92 int rotated_planes = 0; 335 int rotated_planes = 0;
336 int ret;
93 337
94 /* 338 /*
95 * check if there is enough rotation memory available for planes 339 * check if there is enough rotation memory available for planes
@@ -156,7 +400,11 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
156 } 400 }
157 } 401 }
158 402
159 return 0; 403 ret = malidp_crtc_atomic_check_gamma(crtc, state);
404 ret = ret ? ret : malidp_crtc_atomic_check_ctm(crtc, state);
405 ret = ret ? ret : malidp_crtc_atomic_check_scaling(crtc, state);
406
407 return ret;
160} 408}
161 409
162static const struct drm_crtc_helper_funcs malidp_crtc_helper_funcs = { 410static const struct drm_crtc_helper_funcs malidp_crtc_helper_funcs = {
@@ -166,6 +414,60 @@ static const struct drm_crtc_helper_funcs malidp_crtc_helper_funcs = {
166 .atomic_check = malidp_crtc_atomic_check, 414 .atomic_check = malidp_crtc_atomic_check,
167}; 415};
168 416
417static struct drm_crtc_state *malidp_crtc_duplicate_state(struct drm_crtc *crtc)
418{
419 struct malidp_crtc_state *state, *old_state;
420
421 if (WARN_ON(!crtc->state))
422 return NULL;
423
424 old_state = to_malidp_crtc_state(crtc->state);
425 state = kmalloc(sizeof(*state), GFP_KERNEL);
426 if (!state)
427 return NULL;
428
429 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
430 memcpy(state->gamma_coeffs, old_state->gamma_coeffs,
431 sizeof(state->gamma_coeffs));
432 memcpy(state->coloradj_coeffs, old_state->coloradj_coeffs,
433 sizeof(state->coloradj_coeffs));
434 memcpy(&state->scaler_config, &old_state->scaler_config,
435 sizeof(state->scaler_config));
436 state->scaled_planes_mask = 0;
437
438 return &state->base;
439}
440
441static void malidp_crtc_reset(struct drm_crtc *crtc)
442{
443 struct malidp_crtc_state *state = NULL;
444
445 if (crtc->state) {
446 state = to_malidp_crtc_state(crtc->state);
447 __drm_atomic_helper_crtc_destroy_state(crtc->state);
448 }
449
450 kfree(state);
451 state = kzalloc(sizeof(*state), GFP_KERNEL);
452 if (state) {
453 crtc->state = &state->base;
454 crtc->state->crtc = crtc;
455 }
456}
457
458static void malidp_crtc_destroy_state(struct drm_crtc *crtc,
459 struct drm_crtc_state *state)
460{
461 struct malidp_crtc_state *mali_state = NULL;
462
463 if (state) {
464 mali_state = to_malidp_crtc_state(state);
465 __drm_atomic_helper_crtc_destroy_state(state);
466 }
467
468 kfree(mali_state);
469}
470
169static int malidp_crtc_enable_vblank(struct drm_crtc *crtc) 471static int malidp_crtc_enable_vblank(struct drm_crtc *crtc)
170{ 472{
171 struct malidp_drm *malidp = crtc_to_malidp_device(crtc); 473 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
@@ -186,12 +488,13 @@ static void malidp_crtc_disable_vblank(struct drm_crtc *crtc)
186} 488}
187 489
188static const struct drm_crtc_funcs malidp_crtc_funcs = { 490static const struct drm_crtc_funcs malidp_crtc_funcs = {
491 .gamma_set = drm_atomic_helper_legacy_gamma_set,
189 .destroy = drm_crtc_cleanup, 492 .destroy = drm_crtc_cleanup,
190 .set_config = drm_atomic_helper_set_config, 493 .set_config = drm_atomic_helper_set_config,
191 .page_flip = drm_atomic_helper_page_flip, 494 .page_flip = drm_atomic_helper_page_flip,
192 .reset = drm_atomic_helper_crtc_reset, 495 .reset = malidp_crtc_reset,
193 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 496 .atomic_duplicate_state = malidp_crtc_duplicate_state,
194 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 497 .atomic_destroy_state = malidp_crtc_destroy_state,
195 .enable_vblank = malidp_crtc_enable_vblank, 498 .enable_vblank = malidp_crtc_enable_vblank,
196 .disable_vblank = malidp_crtc_disable_vblank, 499 .disable_vblank = malidp_crtc_disable_vblank,
197}; 500};
@@ -223,11 +526,17 @@ int malidp_crtc_init(struct drm_device *drm)
223 526
224 ret = drm_crtc_init_with_planes(drm, &malidp->crtc, primary, NULL, 527 ret = drm_crtc_init_with_planes(drm, &malidp->crtc, primary, NULL,
225 &malidp_crtc_funcs, NULL); 528 &malidp_crtc_funcs, NULL);
529 if (ret)
530 goto crtc_cleanup_planes;
226 531
227 if (!ret) { 532 drm_crtc_helper_add(&malidp->crtc, &malidp_crtc_helper_funcs);
228 drm_crtc_helper_add(&malidp->crtc, &malidp_crtc_helper_funcs); 533 drm_mode_crtc_set_gamma_size(&malidp->crtc, MALIDP_GAMMA_LUT_SIZE);
229 return 0; 534 /* No inverse-gamma: it is per-plane. */
230 } 535 drm_crtc_enable_color_mgmt(&malidp->crtc, 0, true, MALIDP_GAMMA_LUT_SIZE);
536
537 malidp_se_set_enh_coeffs(malidp->dev);
538
539 return 0;
231 540
232crtc_cleanup_planes: 541crtc_cleanup_planes:
233 malidp_de_planes_destroy(drm); 542 malidp_de_planes_destroy(drm);
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index ea2546f766c2..0d3eb537d08b 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -13,9 +13,11 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/clk.h> 14#include <linux/clk.h>
15#include <linux/component.h> 15#include <linux/component.h>
16#include <linux/console.h>
16#include <linux/of_device.h> 17#include <linux/of_device.h>
17#include <linux/of_graph.h> 18#include <linux/of_graph.h>
18#include <linux/of_reserved_mem.h> 19#include <linux/of_reserved_mem.h>
20#include <linux/pm_runtime.h>
19 21
20#include <drm/drmP.h> 22#include <drm/drmP.h>
21#include <drm/drm_atomic.h> 23#include <drm/drm_atomic.h>
@@ -32,6 +34,131 @@
32 34
33#define MALIDP_CONF_VALID_TIMEOUT 250 35#define MALIDP_CONF_VALID_TIMEOUT 250
34 36
37static void malidp_write_gamma_table(struct malidp_hw_device *hwdev,
38 u32 data[MALIDP_COEFFTAB_NUM_COEFFS])
39{
40 int i;
41 /* Update all channels with a single gamma curve. */
42 const u32 gamma_write_mask = GENMASK(18, 16);
43 /*
44 * Always write an entire table, so the address field in
45 * DE_COEFFTAB_ADDR is 0 and we can use the gamma_write_mask bitmask
46 * directly.
47 */
48 malidp_hw_write(hwdev, gamma_write_mask,
49 hwdev->map.coeffs_base + MALIDP_COEF_TABLE_ADDR);
50 for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i)
51 malidp_hw_write(hwdev, data[i],
52 hwdev->map.coeffs_base +
53 MALIDP_COEF_TABLE_DATA);
54}
55
56static void malidp_atomic_commit_update_gamma(struct drm_crtc *crtc,
57 struct drm_crtc_state *old_state)
58{
59 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
60 struct malidp_hw_device *hwdev = malidp->dev;
61
62 if (!crtc->state->color_mgmt_changed)
63 return;
64
65 if (!crtc->state->gamma_lut) {
66 malidp_hw_clearbits(hwdev,
67 MALIDP_DISP_FUNC_GAMMA,
68 MALIDP_DE_DISPLAY_FUNC);
69 } else {
70 struct malidp_crtc_state *mc =
71 to_malidp_crtc_state(crtc->state);
72
73 if (!old_state->gamma_lut || (crtc->state->gamma_lut->base.id !=
74 old_state->gamma_lut->base.id))
75 malidp_write_gamma_table(hwdev, mc->gamma_coeffs);
76
77 malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_GAMMA,
78 MALIDP_DE_DISPLAY_FUNC);
79 }
80}
81
82static
83void malidp_atomic_commit_update_coloradj(struct drm_crtc *crtc,
84 struct drm_crtc_state *old_state)
85{
86 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
87 struct malidp_hw_device *hwdev = malidp->dev;
88 int i;
89
90 if (!crtc->state->color_mgmt_changed)
91 return;
92
93 if (!crtc->state->ctm) {
94 malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_CADJ,
95 MALIDP_DE_DISPLAY_FUNC);
96 } else {
97 struct malidp_crtc_state *mc =
98 to_malidp_crtc_state(crtc->state);
99
100 if (!old_state->ctm || (crtc->state->ctm->base.id !=
101 old_state->ctm->base.id))
102 for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; ++i)
103 malidp_hw_write(hwdev,
104 mc->coloradj_coeffs[i],
105 hwdev->map.coeffs_base +
106 MALIDP_COLOR_ADJ_COEF + 4 * i);
107
108 malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_CADJ,
109 MALIDP_DE_DISPLAY_FUNC);
110 }
111}
112
113static void malidp_atomic_commit_se_config(struct drm_crtc *crtc,
114 struct drm_crtc_state *old_state)
115{
116 struct malidp_crtc_state *cs = to_malidp_crtc_state(crtc->state);
117 struct malidp_crtc_state *old_cs = to_malidp_crtc_state(old_state);
118 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
119 struct malidp_hw_device *hwdev = malidp->dev;
120 struct malidp_se_config *s = &cs->scaler_config;
121 struct malidp_se_config *old_s = &old_cs->scaler_config;
122 u32 se_control = hwdev->map.se_base +
123 ((hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
124 0x10 : 0xC);
125 u32 layer_control = se_control + MALIDP_SE_LAYER_CONTROL;
126 u32 scr = se_control + MALIDP_SE_SCALING_CONTROL;
127 u32 val;
128
129 /* Set SE_CONTROL */
130 if (!s->scale_enable) {
131 val = malidp_hw_read(hwdev, se_control);
132 val &= ~MALIDP_SE_SCALING_EN;
133 malidp_hw_write(hwdev, val, se_control);
134 return;
135 }
136
137 hwdev->se_set_scaling_coeffs(hwdev, s, old_s);
138 val = malidp_hw_read(hwdev, se_control);
139 val |= MALIDP_SE_SCALING_EN | MALIDP_SE_ALPHA_EN;
140
141 val &= ~MALIDP_SE_ENH(MALIDP_SE_ENH_MASK);
142 val |= s->enhancer_enable ? MALIDP_SE_ENH(3) : 0;
143
144 val |= MALIDP_SE_RGBO_IF_EN;
145 malidp_hw_write(hwdev, val, se_control);
146
147 /* Set IN_SIZE & OUT_SIZE. */
148 val = MALIDP_SE_SET_V_SIZE(s->input_h) |
149 MALIDP_SE_SET_H_SIZE(s->input_w);
150 malidp_hw_write(hwdev, val, layer_control + MALIDP_SE_L0_IN_SIZE);
151 val = MALIDP_SE_SET_V_SIZE(s->output_h) |
152 MALIDP_SE_SET_H_SIZE(s->output_w);
153 malidp_hw_write(hwdev, val, layer_control + MALIDP_SE_L0_OUT_SIZE);
154
155 /* Set phase regs. */
156 malidp_hw_write(hwdev, s->h_init_phase, scr + MALIDP_SE_H_INIT_PH);
157 malidp_hw_write(hwdev, s->h_delta_phase, scr + MALIDP_SE_H_DELTA_PH);
158 malidp_hw_write(hwdev, s->v_init_phase, scr + MALIDP_SE_V_INIT_PH);
159 malidp_hw_write(hwdev, s->v_delta_phase, scr + MALIDP_SE_V_DELTA_PH);
160}
161
35/* 162/*
36 * set the "config valid" bit and wait until the hardware acts on it 163 * set the "config valid" bit and wait until the hardware acts on it
37 */ 164 */
@@ -66,10 +193,12 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
66 struct drm_pending_vblank_event *event; 193 struct drm_pending_vblank_event *event;
67 struct drm_device *drm = state->dev; 194 struct drm_device *drm = state->dev;
68 struct malidp_drm *malidp = drm->dev_private; 195 struct malidp_drm *malidp = drm->dev_private;
69 int ret = malidp_set_and_wait_config_valid(drm);
70 196
71 if (ret) 197 if (malidp->crtc.enabled) {
72 DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n"); 198 /* only set config_valid if the CRTC is enabled */
199 if (malidp_set_and_wait_config_valid(drm))
200 DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n");
201 }
73 202
74 event = malidp->crtc.state->event; 203 event = malidp->crtc.state->event;
75 if (event) { 204 if (event) {
@@ -88,15 +217,30 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
88static void malidp_atomic_commit_tail(struct drm_atomic_state *state) 217static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
89{ 218{
90 struct drm_device *drm = state->dev; 219 struct drm_device *drm = state->dev;
220 struct drm_crtc *crtc;
221 struct drm_crtc_state *old_crtc_state;
222 int i;
223
224 pm_runtime_get_sync(drm->dev);
91 225
92 drm_atomic_helper_commit_modeset_disables(drm, state); 226 drm_atomic_helper_commit_modeset_disables(drm, state);
93 drm_atomic_helper_commit_modeset_enables(drm, state); 227
228 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
229 malidp_atomic_commit_update_gamma(crtc, old_crtc_state);
230 malidp_atomic_commit_update_coloradj(crtc, old_crtc_state);
231 malidp_atomic_commit_se_config(crtc, old_crtc_state);
232 }
233
94 drm_atomic_helper_commit_planes(drm, state, 0); 234 drm_atomic_helper_commit_planes(drm, state, 0);
95 235
236 drm_atomic_helper_commit_modeset_enables(drm, state);
237
96 malidp_atomic_commit_hw_done(state); 238 malidp_atomic_commit_hw_done(state);
97 239
98 drm_atomic_helper_wait_for_vblanks(drm, state); 240 drm_atomic_helper_wait_for_vblanks(drm, state);
99 241
242 pm_runtime_put(drm->dev);
243
100 drm_atomic_helper_cleanup_planes(drm, state); 244 drm_atomic_helper_cleanup_planes(drm, state);
101} 245}
102 246
@@ -277,13 +421,69 @@ static bool malidp_has_sufficient_address_space(const struct resource *res,
277 return true; 421 return true;
278} 422}
279 423
424static ssize_t core_id_show(struct device *dev, struct device_attribute *attr,
425 char *buf)
426{
427 struct drm_device *drm = dev_get_drvdata(dev);
428 struct malidp_drm *malidp = drm->dev_private;
429
430 return snprintf(buf, PAGE_SIZE, "%08x\n", malidp->core_id);
431}
432
433DEVICE_ATTR_RO(core_id);
434
435static int malidp_init_sysfs(struct device *dev)
436{
437 int ret = device_create_file(dev, &dev_attr_core_id);
438
439 if (ret)
440 DRM_ERROR("failed to create device file for core_id\n");
441
442 return ret;
443}
444
445static void malidp_fini_sysfs(struct device *dev)
446{
447 device_remove_file(dev, &dev_attr_core_id);
448}
449
280#define MAX_OUTPUT_CHANNELS 3 450#define MAX_OUTPUT_CHANNELS 3
281 451
452static int malidp_runtime_pm_suspend(struct device *dev)
453{
454 struct drm_device *drm = dev_get_drvdata(dev);
455 struct malidp_drm *malidp = drm->dev_private;
456 struct malidp_hw_device *hwdev = malidp->dev;
457
458 /* we can only suspend if the hardware is in config mode */
459 WARN_ON(!hwdev->in_config_mode(hwdev));
460
461 hwdev->pm_suspended = true;
462 clk_disable_unprepare(hwdev->mclk);
463 clk_disable_unprepare(hwdev->aclk);
464 clk_disable_unprepare(hwdev->pclk);
465
466 return 0;
467}
468
469static int malidp_runtime_pm_resume(struct device *dev)
470{
471 struct drm_device *drm = dev_get_drvdata(dev);
472 struct malidp_drm *malidp = drm->dev_private;
473 struct malidp_hw_device *hwdev = malidp->dev;
474
475 clk_prepare_enable(hwdev->pclk);
476 clk_prepare_enable(hwdev->aclk);
477 clk_prepare_enable(hwdev->mclk);
478 hwdev->pm_suspended = false;
479
480 return 0;
481}
482
282static int malidp_bind(struct device *dev) 483static int malidp_bind(struct device *dev)
283{ 484{
284 struct resource *res; 485 struct resource *res;
285 struct drm_device *drm; 486 struct drm_device *drm;
286 struct device_node *ep;
287 struct malidp_drm *malidp; 487 struct malidp_drm *malidp;
288 struct malidp_hw_device *hwdev; 488 struct malidp_hw_device *hwdev;
289 struct platform_device *pdev = to_platform_device(dev); 489 struct platform_device *pdev = to_platform_device(dev);
@@ -308,7 +508,6 @@ static int malidp_bind(struct device *dev)
308 memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev)); 508 memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev));
309 malidp->dev = hwdev; 509 malidp->dev = hwdev;
310 510
311
312 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 511 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
313 hwdev->regs = devm_ioremap_resource(dev, res); 512 hwdev->regs = devm_ioremap_resource(dev, res);
314 if (IS_ERR(hwdev->regs)) 513 if (IS_ERR(hwdev->regs))
@@ -341,14 +540,17 @@ static int malidp_bind(struct device *dev)
341 goto alloc_fail; 540 goto alloc_fail;
342 } 541 }
343 542
344 /* Enable APB clock in order to get access to the registers */ 543 drm->dev_private = malidp;
345 clk_prepare_enable(hwdev->pclk); 544 dev_set_drvdata(dev, drm);
346 /* 545
347 * Enable AXI clock and main clock so that prefetch can start once 546 /* Enable power management */
348 * the registers are set 547 pm_runtime_enable(dev);
349 */ 548
350 clk_prepare_enable(hwdev->aclk); 549 /* Resume device to enable the clocks */
351 clk_prepare_enable(hwdev->mclk); 550 if (pm_runtime_enabled(dev))
551 pm_runtime_get_sync(dev);
552 else
553 malidp_runtime_pm_resume(dev);
352 554
353 dev_id = of_match_device(malidp_drm_of_match, dev); 555 dev_id = of_match_device(malidp_drm_of_match, dev);
354 if (!dev_id) { 556 if (!dev_id) {
@@ -377,6 +579,8 @@ static int malidp_bind(struct device *dev)
377 DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16, 579 DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16,
378 (version >> 12) & 0xf, (version >> 8) & 0xf); 580 (version >> 12) & 0xf, (version >> 8) & 0xf);
379 581
582 malidp->core_id = version;
583
380 /* set the number of lines used for output of RGB data */ 584 /* set the number of lines used for output of RGB data */
381 ret = of_property_read_u8_array(dev->of_node, 585 ret = of_property_read_u8_array(dev->of_node,
382 "arm,malidp-output-port-lines", 586 "arm,malidp-output-port-lines",
@@ -388,22 +592,19 @@ static int malidp_bind(struct device *dev)
388 out_depth = (out_depth << 8) | (output_width[i] & 0xf); 592 out_depth = (out_depth << 8) | (output_width[i] & 0xf);
389 malidp_hw_write(hwdev, out_depth, hwdev->map.out_depth_base); 593 malidp_hw_write(hwdev, out_depth, hwdev->map.out_depth_base);
390 594
391 drm->dev_private = malidp;
392 dev_set_drvdata(dev, drm);
393 atomic_set(&malidp->config_valid, 0); 595 atomic_set(&malidp->config_valid, 0);
394 init_waitqueue_head(&malidp->wq); 596 init_waitqueue_head(&malidp->wq);
395 597
396 ret = malidp_init(drm); 598 ret = malidp_init(drm);
397 if (ret < 0) 599 if (ret < 0)
600 goto query_hw_fail;
601
602 ret = malidp_init_sysfs(dev);
603 if (ret)
398 goto init_fail; 604 goto init_fail;
399 605
400 /* Set the CRTC's port so that the encoder component can find it */ 606 /* Set the CRTC's port so that the encoder component can find it */
401 ep = of_graph_get_next_endpoint(dev->of_node, NULL); 607 malidp->crtc.port = of_graph_get_port_by_id(dev->of_node, 0);
402 if (!ep) {
403 ret = -EINVAL;
404 goto port_fail;
405 }
406 malidp->crtc.port = of_get_next_parent(ep);
407 608
408 ret = component_bind_all(dev, drm); 609 ret = component_bind_all(dev, drm);
409 if (ret) { 610 if (ret) {
@@ -422,6 +623,7 @@ static int malidp_bind(struct device *dev)
422 DRM_ERROR("failed to initialise vblank\n"); 623 DRM_ERROR("failed to initialise vblank\n");
423 goto vblank_fail; 624 goto vblank_fail;
424 } 625 }
626 pm_runtime_put(dev);
425 627
426 drm_mode_config_reset(drm); 628 drm_mode_config_reset(drm);
427 629
@@ -447,7 +649,9 @@ register_fail:
447 drm_fbdev_cma_fini(malidp->fbdev); 649 drm_fbdev_cma_fini(malidp->fbdev);
448 malidp->fbdev = NULL; 650 malidp->fbdev = NULL;
449 } 651 }
652 drm_kms_helper_poll_fini(drm);
450fbdev_fail: 653fbdev_fail:
654 pm_runtime_get_sync(dev);
451 drm_vblank_cleanup(drm); 655 drm_vblank_cleanup(drm);
452vblank_fail: 656vblank_fail:
453 malidp_se_irq_fini(drm); 657 malidp_se_irq_fini(drm);
@@ -458,15 +662,17 @@ irq_init_fail:
458bind_fail: 662bind_fail:
459 of_node_put(malidp->crtc.port); 663 of_node_put(malidp->crtc.port);
460 malidp->crtc.port = NULL; 664 malidp->crtc.port = NULL;
461port_fail:
462 malidp_fini(drm);
463init_fail: 665init_fail:
666 malidp_fini_sysfs(dev);
667 malidp_fini(drm);
668query_hw_fail:
669 pm_runtime_put(dev);
670 if (pm_runtime_enabled(dev))
671 pm_runtime_disable(dev);
672 else
673 malidp_runtime_pm_suspend(dev);
464 drm->dev_private = NULL; 674 drm->dev_private = NULL;
465 dev_set_drvdata(dev, NULL); 675 dev_set_drvdata(dev, NULL);
466query_hw_fail:
467 clk_disable_unprepare(hwdev->mclk);
468 clk_disable_unprepare(hwdev->aclk);
469 clk_disable_unprepare(hwdev->pclk);
470 drm_dev_unref(drm); 676 drm_dev_unref(drm);
471alloc_fail: 677alloc_fail:
472 of_reserved_mem_device_release(dev); 678 of_reserved_mem_device_release(dev);
@@ -478,7 +684,6 @@ static void malidp_unbind(struct device *dev)
478{ 684{
479 struct drm_device *drm = dev_get_drvdata(dev); 685 struct drm_device *drm = dev_get_drvdata(dev);
480 struct malidp_drm *malidp = drm->dev_private; 686 struct malidp_drm *malidp = drm->dev_private;
481 struct malidp_hw_device *hwdev = malidp->dev;
482 687
483 drm_dev_unregister(drm); 688 drm_dev_unregister(drm);
484 if (malidp->fbdev) { 689 if (malidp->fbdev) {
@@ -486,18 +691,22 @@ static void malidp_unbind(struct device *dev)
486 malidp->fbdev = NULL; 691 malidp->fbdev = NULL;
487 } 692 }
488 drm_kms_helper_poll_fini(drm); 693 drm_kms_helper_poll_fini(drm);
694 pm_runtime_get_sync(dev);
695 drm_vblank_cleanup(drm);
489 malidp_se_irq_fini(drm); 696 malidp_se_irq_fini(drm);
490 malidp_de_irq_fini(drm); 697 malidp_de_irq_fini(drm);
491 drm_vblank_cleanup(drm);
492 component_unbind_all(dev, drm); 698 component_unbind_all(dev, drm);
493 of_node_put(malidp->crtc.port); 699 of_node_put(malidp->crtc.port);
494 malidp->crtc.port = NULL; 700 malidp->crtc.port = NULL;
701 malidp_fini_sysfs(dev);
495 malidp_fini(drm); 702 malidp_fini(drm);
703 pm_runtime_put(dev);
704 if (pm_runtime_enabled(dev))
705 pm_runtime_disable(dev);
706 else
707 malidp_runtime_pm_suspend(dev);
496 drm->dev_private = NULL; 708 drm->dev_private = NULL;
497 dev_set_drvdata(dev, NULL); 709 dev_set_drvdata(dev, NULL);
498 clk_disable_unprepare(hwdev->mclk);
499 clk_disable_unprepare(hwdev->aclk);
500 clk_disable_unprepare(hwdev->pclk);
501 drm_dev_unref(drm); 710 drm_dev_unref(drm);
502 of_reserved_mem_device_release(dev); 711 of_reserved_mem_device_release(dev);
503} 712}
@@ -516,30 +725,17 @@ static int malidp_compare_dev(struct device *dev, void *data)
516 725
517static int malidp_platform_probe(struct platform_device *pdev) 726static int malidp_platform_probe(struct platform_device *pdev)
518{ 727{
519 struct device_node *port, *ep; 728 struct device_node *port;
520 struct component_match *match = NULL; 729 struct component_match *match = NULL;
521 730
522 if (!pdev->dev.of_node) 731 if (!pdev->dev.of_node)
523 return -ENODEV; 732 return -ENODEV;
524 733
525 /* there is only one output port inside each device, find it */ 734 /* there is only one output port inside each device, find it */
526 ep = of_graph_get_next_endpoint(pdev->dev.of_node, NULL); 735 port = of_graph_get_remote_node(pdev->dev.of_node, 0, 0);
527 if (!ep) 736 if (!port)
528 return -ENODEV; 737 return -ENODEV;
529 738
530 if (!of_device_is_available(ep)) {
531 of_node_put(ep);
532 return -ENODEV;
533 }
534
535 /* add the remote encoder port as component */
536 port = of_graph_get_remote_port_parent(ep);
537 of_node_put(ep);
538 if (!port || !of_device_is_available(port)) {
539 of_node_put(port);
540 return -EAGAIN;
541 }
542
543 drm_of_component_match_add(&pdev->dev, &match, malidp_compare_dev, 739 drm_of_component_match_add(&pdev->dev, &match, malidp_compare_dev,
544 port); 740 port);
545 of_node_put(port); 741 of_node_put(port);
@@ -553,11 +749,52 @@ static int malidp_platform_remove(struct platform_device *pdev)
553 return 0; 749 return 0;
554} 750}
555 751
752static int __maybe_unused malidp_pm_suspend(struct device *dev)
753{
754 struct drm_device *drm = dev_get_drvdata(dev);
755 struct malidp_drm *malidp = drm->dev_private;
756
757 drm_kms_helper_poll_disable(drm);
758 console_lock();
759 drm_fbdev_cma_set_suspend(malidp->fbdev, 1);
760 console_unlock();
761 malidp->pm_state = drm_atomic_helper_suspend(drm);
762 if (IS_ERR(malidp->pm_state)) {
763 console_lock();
764 drm_fbdev_cma_set_suspend(malidp->fbdev, 0);
765 console_unlock();
766 drm_kms_helper_poll_enable(drm);
767 return PTR_ERR(malidp->pm_state);
768 }
769
770 return 0;
771}
772
773static int __maybe_unused malidp_pm_resume(struct device *dev)
774{
775 struct drm_device *drm = dev_get_drvdata(dev);
776 struct malidp_drm *malidp = drm->dev_private;
777
778 drm_atomic_helper_resume(drm, malidp->pm_state);
779 console_lock();
780 drm_fbdev_cma_set_suspend(malidp->fbdev, 0);
781 console_unlock();
782 drm_kms_helper_poll_enable(drm);
783
784 return 0;
785}
786
787static const struct dev_pm_ops malidp_pm_ops = {
788 SET_SYSTEM_SLEEP_PM_OPS(malidp_pm_suspend, malidp_pm_resume) \
789 SET_RUNTIME_PM_OPS(malidp_runtime_pm_suspend, malidp_runtime_pm_resume, NULL)
790};
791
556static struct platform_driver malidp_platform_driver = { 792static struct platform_driver malidp_platform_driver = {
557 .probe = malidp_platform_probe, 793 .probe = malidp_platform_probe,
558 .remove = malidp_platform_remove, 794 .remove = malidp_platform_remove,
559 .driver = { 795 .driver = {
560 .name = "mali-dp", 796 .name = "mali-dp",
797 .pm = &malidp_pm_ops,
561 .of_match_table = malidp_drm_of_match, 798 .of_match_table = malidp_drm_of_match,
562 }, 799 },
563}; 800};
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
index dbc617c6e4ef..040311ffcaec 100644
--- a/drivers/gpu/drm/arm/malidp_drv.h
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -24,6 +24,8 @@ struct malidp_drm {
24 struct drm_crtc crtc; 24 struct drm_crtc crtc;
25 wait_queue_head_t wq; 25 wait_queue_head_t wq;
26 atomic_t config_valid; 26 atomic_t config_valid;
27 struct drm_atomic_state *pm_state;
28 u32 core_id;
27}; 29};
28 30
29#define crtc_to_malidp_device(x) container_of(x, struct malidp_drm, crtc) 31#define crtc_to_malidp_device(x) container_of(x, struct malidp_drm, crtc)
@@ -47,6 +49,17 @@ struct malidp_plane_state {
47#define to_malidp_plane(x) container_of(x, struct malidp_plane, base) 49#define to_malidp_plane(x) container_of(x, struct malidp_plane, base)
48#define to_malidp_plane_state(x) container_of(x, struct malidp_plane_state, base) 50#define to_malidp_plane_state(x) container_of(x, struct malidp_plane_state, base)
49 51
52struct malidp_crtc_state {
53 struct drm_crtc_state base;
54 u32 gamma_coeffs[MALIDP_COEFFTAB_NUM_COEFFS];
55 u32 coloradj_coeffs[MALIDP_COLORADJ_NUM_COEFFS];
56 struct malidp_se_config scaler_config;
57 /* Bitfield of all the planes that have requested a scaled output. */
58 u8 scaled_planes_mask;
59};
60
61#define to_malidp_crtc_state(x) container_of(x, struct malidp_crtc_state, base)
62
50int malidp_de_planes_init(struct drm_device *drm); 63int malidp_de_planes_init(struct drm_device *drm);
51void malidp_de_planes_destroy(struct drm_device *drm); 64void malidp_de_planes_destroy(struct drm_device *drm);
52int malidp_crtc_init(struct drm_device *drm); 65int malidp_crtc_init(struct drm_device *drm);
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index 9f5513006eee..28360b8542f7 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -12,6 +12,7 @@
12 * in an attempt to provide to the rest of the driver code a unified view 12 * in an attempt to provide to the rest of the driver code a unified view
13 */ 13 */
14 14
15#include <linux/clk.h>
15#include <linux/types.h> 16#include <linux/types.h>
16#include <linux/io.h> 17#include <linux/io.h>
17#include <drm/drmP.h> 18#include <drm/drmP.h>
@@ -86,6 +87,80 @@ static const struct malidp_layer malidp550_layers[] = {
86 { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE }, 87 { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE },
87}; 88};
88 89
90#define SE_N_SCALING_COEFFS 96
91static const u16 dp500_se_scaling_coeffs[][SE_N_SCALING_COEFFS] = {
92 [MALIDP_UPSCALING_COEFFS - 1] = {
93 0x0000, 0x0001, 0x0007, 0x0011, 0x001e, 0x002e, 0x003f, 0x0052,
94 0x0064, 0x0073, 0x007d, 0x0080, 0x007a, 0x006c, 0x0053, 0x002f,
95 0x0000, 0x3fc6, 0x3f83, 0x3f39, 0x3eea, 0x3e9b, 0x3e4f, 0x3e0a,
96 0x3dd4, 0x3db0, 0x3da2, 0x3db1, 0x3dde, 0x3e2f, 0x3ea5, 0x3f40,
97 0x0000, 0x00e5, 0x01ee, 0x0315, 0x0456, 0x05aa, 0x0709, 0x086c,
98 0x09c9, 0x0b15, 0x0c4a, 0x0d5d, 0x0e4a, 0x0f06, 0x0f91, 0x0fe5,
99 0x1000, 0x0fe5, 0x0f91, 0x0f06, 0x0e4a, 0x0d5d, 0x0c4a, 0x0b15,
100 0x09c9, 0x086c, 0x0709, 0x05aa, 0x0456, 0x0315, 0x01ee, 0x00e5,
101 0x0000, 0x3f40, 0x3ea5, 0x3e2f, 0x3dde, 0x3db1, 0x3da2, 0x3db0,
102 0x3dd4, 0x3e0a, 0x3e4f, 0x3e9b, 0x3eea, 0x3f39, 0x3f83, 0x3fc6,
103 0x0000, 0x002f, 0x0053, 0x006c, 0x007a, 0x0080, 0x007d, 0x0073,
104 0x0064, 0x0052, 0x003f, 0x002e, 0x001e, 0x0011, 0x0007, 0x0001
105 },
106 [MALIDP_DOWNSCALING_1_5_COEFFS - 1] = {
107 0x0059, 0x004f, 0x0041, 0x002e, 0x0016, 0x3ffb, 0x3fd9, 0x3fb4,
108 0x3f8c, 0x3f62, 0x3f36, 0x3f09, 0x3edd, 0x3eb3, 0x3e8d, 0x3e6c,
109 0x3e52, 0x3e3f, 0x3e35, 0x3e37, 0x3e46, 0x3e61, 0x3e8c, 0x3ec5,
110 0x3f0f, 0x3f68, 0x3fd1, 0x004a, 0x00d3, 0x0169, 0x020b, 0x02b8,
111 0x036e, 0x042d, 0x04f2, 0x05b9, 0x0681, 0x0745, 0x0803, 0x08ba,
112 0x0965, 0x0a03, 0x0a91, 0x0b0d, 0x0b75, 0x0bc6, 0x0c00, 0x0c20,
113 0x0c28, 0x0c20, 0x0c00, 0x0bc6, 0x0b75, 0x0b0d, 0x0a91, 0x0a03,
114 0x0965, 0x08ba, 0x0803, 0x0745, 0x0681, 0x05b9, 0x04f2, 0x042d,
115 0x036e, 0x02b8, 0x020b, 0x0169, 0x00d3, 0x004a, 0x3fd1, 0x3f68,
116 0x3f0f, 0x3ec5, 0x3e8c, 0x3e61, 0x3e46, 0x3e37, 0x3e35, 0x3e3f,
117 0x3e52, 0x3e6c, 0x3e8d, 0x3eb3, 0x3edd, 0x3f09, 0x3f36, 0x3f62,
118 0x3f8c, 0x3fb4, 0x3fd9, 0x3ffb, 0x0016, 0x002e, 0x0041, 0x004f
119 },
120 [MALIDP_DOWNSCALING_2_COEFFS - 1] = {
121 0x3f19, 0x3f03, 0x3ef0, 0x3edf, 0x3ed0, 0x3ec5, 0x3ebd, 0x3eb9,
122 0x3eb9, 0x3ebf, 0x3eca, 0x3ed9, 0x3eef, 0x3f0a, 0x3f2c, 0x3f52,
123 0x3f7f, 0x3fb0, 0x3fe8, 0x0026, 0x006a, 0x00b4, 0x0103, 0x0158,
124 0x01b1, 0x020d, 0x026c, 0x02cd, 0x032f, 0x0392, 0x03f4, 0x0455,
125 0x04b4, 0x051e, 0x0585, 0x05eb, 0x064c, 0x06a8, 0x06fe, 0x074e,
126 0x0796, 0x07d5, 0x080c, 0x0839, 0x085c, 0x0875, 0x0882, 0x0887,
127 0x0881, 0x0887, 0x0882, 0x0875, 0x085c, 0x0839, 0x080c, 0x07d5,
128 0x0796, 0x074e, 0x06fe, 0x06a8, 0x064c, 0x05eb, 0x0585, 0x051e,
129 0x04b4, 0x0455, 0x03f4, 0x0392, 0x032f, 0x02cd, 0x026c, 0x020d,
130 0x01b1, 0x0158, 0x0103, 0x00b4, 0x006a, 0x0026, 0x3fe8, 0x3fb0,
131 0x3f7f, 0x3f52, 0x3f2c, 0x3f0a, 0x3eef, 0x3ed9, 0x3eca, 0x3ebf,
132 0x3eb9, 0x3eb9, 0x3ebd, 0x3ec5, 0x3ed0, 0x3edf, 0x3ef0, 0x3f03
133 },
134 [MALIDP_DOWNSCALING_2_75_COEFFS - 1] = {
135 0x3f51, 0x3f60, 0x3f71, 0x3f84, 0x3f98, 0x3faf, 0x3fc8, 0x3fe3,
136 0x0000, 0x001f, 0x0040, 0x0064, 0x008a, 0x00b1, 0x00da, 0x0106,
137 0x0133, 0x0160, 0x018e, 0x01bd, 0x01ec, 0x021d, 0x024e, 0x0280,
138 0x02b2, 0x02e4, 0x0317, 0x0349, 0x037c, 0x03ad, 0x03df, 0x0410,
139 0x0440, 0x0468, 0x048f, 0x04b3, 0x04d6, 0x04f8, 0x0516, 0x0533,
140 0x054e, 0x0566, 0x057c, 0x0590, 0x05a0, 0x05ae, 0x05ba, 0x05c3,
141 0x05c9, 0x05c3, 0x05ba, 0x05ae, 0x05a0, 0x0590, 0x057c, 0x0566,
142 0x054e, 0x0533, 0x0516, 0x04f8, 0x04d6, 0x04b3, 0x048f, 0x0468,
143 0x0440, 0x0410, 0x03df, 0x03ad, 0x037c, 0x0349, 0x0317, 0x02e4,
144 0x02b2, 0x0280, 0x024e, 0x021d, 0x01ec, 0x01bd, 0x018e, 0x0160,
145 0x0133, 0x0106, 0x00da, 0x00b1, 0x008a, 0x0064, 0x0040, 0x001f,
146 0x0000, 0x3fe3, 0x3fc8, 0x3faf, 0x3f98, 0x3f84, 0x3f71, 0x3f60
147 },
148 [MALIDP_DOWNSCALING_4_COEFFS - 1] = {
149 0x0094, 0x00a9, 0x00be, 0x00d4, 0x00ea, 0x0101, 0x0118, 0x012f,
150 0x0148, 0x0160, 0x017a, 0x0193, 0x01ae, 0x01c8, 0x01e4, 0x01ff,
151 0x021c, 0x0233, 0x024a, 0x0261, 0x0278, 0x028f, 0x02a6, 0x02bd,
152 0x02d4, 0x02eb, 0x0302, 0x0319, 0x032f, 0x0346, 0x035d, 0x0374,
153 0x038a, 0x0397, 0x03a3, 0x03af, 0x03bb, 0x03c6, 0x03d1, 0x03db,
154 0x03e4, 0x03ed, 0x03f6, 0x03fe, 0x0406, 0x040d, 0x0414, 0x041a,
155 0x0420, 0x041a, 0x0414, 0x040d, 0x0406, 0x03fe, 0x03f6, 0x03ed,
156 0x03e4, 0x03db, 0x03d1, 0x03c6, 0x03bb, 0x03af, 0x03a3, 0x0397,
157 0x038a, 0x0374, 0x035d, 0x0346, 0x032f, 0x0319, 0x0302, 0x02eb,
158 0x02d4, 0x02bd, 0x02a6, 0x028f, 0x0278, 0x0261, 0x024a, 0x0233,
159 0x021c, 0x01ff, 0x01e4, 0x01c8, 0x01ae, 0x0193, 0x017a, 0x0160,
160 0x0148, 0x012f, 0x0118, 0x0101, 0x00ea, 0x00d4, 0x00be, 0x00a9
161 },
162};
163
89#define MALIDP_DE_DEFAULT_PREFETCH_START 5 164#define MALIDP_DE_DEFAULT_PREFETCH_START 5
90 165
91static int malidp500_query_hw(struct malidp_hw_device *hwdev) 166static int malidp500_query_hw(struct malidp_hw_device *hwdev)
@@ -211,6 +286,88 @@ static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16
211 return w * drm_format_plane_cpp(fmt, 0) * 8; 286 return w * drm_format_plane_cpp(fmt, 0) * 8;
212} 287}
213 288
289static void malidp500_se_write_pp_coefftab(struct malidp_hw_device *hwdev,
290 u32 direction,
291 u16 addr,
292 u8 coeffs_id)
293{
294 int i;
295 u16 scaling_control = MALIDP500_SE_CONTROL + MALIDP_SE_SCALING_CONTROL;
296
297 malidp_hw_write(hwdev,
298 direction | (addr & MALIDP_SE_COEFFTAB_ADDR_MASK),
299 scaling_control + MALIDP_SE_COEFFTAB_ADDR);
300 for (i = 0; i < ARRAY_SIZE(dp500_se_scaling_coeffs); ++i)
301 malidp_hw_write(hwdev, MALIDP_SE_SET_COEFFTAB_DATA(
302 dp500_se_scaling_coeffs[coeffs_id][i]),
303 scaling_control + MALIDP_SE_COEFFTAB_DATA);
304}
305
306static int malidp500_se_set_scaling_coeffs(struct malidp_hw_device *hwdev,
307 struct malidp_se_config *se_config,
308 struct malidp_se_config *old_config)
309{
310 /* Get array indices into dp500_se_scaling_coeffs. */
311 u8 h = (u8)se_config->hcoeff - 1;
312 u8 v = (u8)se_config->vcoeff - 1;
313
314 if (WARN_ON(h >= ARRAY_SIZE(dp500_se_scaling_coeffs) ||
315 v >= ARRAY_SIZE(dp500_se_scaling_coeffs)))
316 return -EINVAL;
317
318 if ((h == v) && (se_config->hcoeff != old_config->hcoeff ||
319 se_config->vcoeff != old_config->vcoeff)) {
320 malidp500_se_write_pp_coefftab(hwdev,
321 (MALIDP_SE_V_COEFFTAB |
322 MALIDP_SE_H_COEFFTAB),
323 0, v);
324 } else {
325 if (se_config->vcoeff != old_config->vcoeff)
326 malidp500_se_write_pp_coefftab(hwdev,
327 MALIDP_SE_V_COEFFTAB,
328 0, v);
329 if (se_config->hcoeff != old_config->hcoeff)
330 malidp500_se_write_pp_coefftab(hwdev,
331 MALIDP_SE_H_COEFFTAB,
332 0, h);
333 }
334
335 return 0;
336}
337
338static long malidp500_se_calc_mclk(struct malidp_hw_device *hwdev,
339 struct malidp_se_config *se_config,
340 struct videomode *vm)
341{
342 unsigned long mclk;
343 unsigned long pxlclk = vm->pixelclock; /* Hz */
344 unsigned long htotal = vm->hactive + vm->hfront_porch +
345 vm->hback_porch + vm->hsync_len;
346 unsigned long input_size = se_config->input_w * se_config->input_h;
347 unsigned long a = 10;
348 long ret;
349
350 /*
351 * mclk = max(a, 1.5) * pxlclk
352 *
353 * To avoid float calculaiton, using 15 instead of 1.5 and div by
354 * 10 to get mclk.
355 */
356 if (se_config->scale_enable) {
357 a = 15 * input_size / (htotal * se_config->output_h);
358 if (a < 15)
359 a = 15;
360 }
361 mclk = a * pxlclk / 10;
362 ret = clk_get_rate(hwdev->mclk);
363 if (ret < mclk) {
364 DRM_DEBUG_DRIVER("mclk requirement of %lu kHz can't be met.\n",
365 mclk / 1000);
366 return -EINVAL;
367 }
368 return ret;
369}
370
214static int malidp550_query_hw(struct malidp_hw_device *hwdev) 371static int malidp550_query_hw(struct malidp_hw_device *hwdev)
215{ 372{
216 u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID); 373 u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID);
@@ -384,6 +541,53 @@ static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16
384 return w * bytes_per_col; 541 return w * bytes_per_col;
385} 542}
386 543
544static int malidp550_se_set_scaling_coeffs(struct malidp_hw_device *hwdev,
545 struct malidp_se_config *se_config,
546 struct malidp_se_config *old_config)
547{
548 u32 mask = MALIDP550_SE_CTL_VCSEL(MALIDP550_SE_CTL_SEL_MASK) |
549 MALIDP550_SE_CTL_HCSEL(MALIDP550_SE_CTL_SEL_MASK);
550 u32 new_value = MALIDP550_SE_CTL_VCSEL(se_config->vcoeff) |
551 MALIDP550_SE_CTL_HCSEL(se_config->hcoeff);
552
553 malidp_hw_clearbits(hwdev, mask, MALIDP550_SE_CONTROL);
554 malidp_hw_setbits(hwdev, new_value, MALIDP550_SE_CONTROL);
555 return 0;
556}
557
558static long malidp550_se_calc_mclk(struct malidp_hw_device *hwdev,
559 struct malidp_se_config *se_config,
560 struct videomode *vm)
561{
562 unsigned long mclk;
563 unsigned long pxlclk = vm->pixelclock;
564 unsigned long htotal = vm->hactive + vm->hfront_porch +
565 vm->hback_porch + vm->hsync_len;
566 unsigned long numerator = 1, denominator = 1;
567 long ret;
568
569 if (se_config->scale_enable) {
570 numerator = max(se_config->input_w, se_config->output_w) *
571 se_config->input_h;
572 numerator += se_config->output_w *
573 (se_config->output_h -
574 min(se_config->input_h, se_config->output_h));
575 denominator = (htotal - 2) * se_config->output_h;
576 }
577
578 /* mclk can't be slower than pxlclk. */
579 if (numerator < denominator)
580 numerator = denominator = 1;
581 mclk = (pxlclk * numerator) / denominator;
582 ret = clk_get_rate(hwdev->mclk);
583 if (ret < mclk) {
584 DRM_DEBUG_DRIVER("mclk requirement of %lu kHz can't be met.\n",
585 mclk / 1000);
586 return -EINVAL;
587 }
588 return ret;
589}
590
387static int malidp650_query_hw(struct malidp_hw_device *hwdev) 591static int malidp650_query_hw(struct malidp_hw_device *hwdev)
388{ 592{
389 u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID); 593 u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID);
@@ -415,6 +619,7 @@ static int malidp650_query_hw(struct malidp_hw_device *hwdev)
415const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = { 619const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
416 [MALIDP_500] = { 620 [MALIDP_500] = {
417 .map = { 621 .map = {
622 .coeffs_base = MALIDP500_COEFFS_BASE,
418 .se_base = MALIDP500_SE_BASE, 623 .se_base = MALIDP500_SE_BASE,
419 .dc_base = MALIDP500_DC_BASE, 624 .dc_base = MALIDP500_DC_BASE,
420 .out_depth_base = MALIDP500_OUTPUT_DEPTH, 625 .out_depth_base = MALIDP500_OUTPUT_DEPTH,
@@ -447,10 +652,13 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
447 .set_config_valid = malidp500_set_config_valid, 652 .set_config_valid = malidp500_set_config_valid,
448 .modeset = malidp500_modeset, 653 .modeset = malidp500_modeset,
449 .rotmem_required = malidp500_rotmem_required, 654 .rotmem_required = malidp500_rotmem_required,
655 .se_set_scaling_coeffs = malidp500_se_set_scaling_coeffs,
656 .se_calc_mclk = malidp500_se_calc_mclk,
450 .features = MALIDP_DEVICE_LV_HAS_3_STRIDES, 657 .features = MALIDP_DEVICE_LV_HAS_3_STRIDES,
451 }, 658 },
452 [MALIDP_550] = { 659 [MALIDP_550] = {
453 .map = { 660 .map = {
661 .coeffs_base = MALIDP550_COEFFS_BASE,
454 .se_base = MALIDP550_SE_BASE, 662 .se_base = MALIDP550_SE_BASE,
455 .dc_base = MALIDP550_DC_BASE, 663 .dc_base = MALIDP550_DC_BASE,
456 .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH, 664 .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
@@ -481,10 +689,13 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
481 .set_config_valid = malidp550_set_config_valid, 689 .set_config_valid = malidp550_set_config_valid,
482 .modeset = malidp550_modeset, 690 .modeset = malidp550_modeset,
483 .rotmem_required = malidp550_rotmem_required, 691 .rotmem_required = malidp550_rotmem_required,
692 .se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs,
693 .se_calc_mclk = malidp550_se_calc_mclk,
484 .features = 0, 694 .features = 0,
485 }, 695 },
486 [MALIDP_650] = { 696 [MALIDP_650] = {
487 .map = { 697 .map = {
698 .coeffs_base = MALIDP550_COEFFS_BASE,
488 .se_base = MALIDP550_SE_BASE, 699 .se_base = MALIDP550_SE_BASE,
489 .dc_base = MALIDP550_DC_BASE, 700 .dc_base = MALIDP550_DC_BASE,
490 .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH, 701 .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
@@ -516,6 +727,8 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
516 .set_config_valid = malidp550_set_config_valid, 727 .set_config_valid = malidp550_set_config_valid,
517 .modeset = malidp550_modeset, 728 .modeset = malidp550_modeset,
518 .rotmem_required = malidp550_rotmem_required, 729 .rotmem_required = malidp550_rotmem_required,
730 .se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs,
731 .se_calc_mclk = malidp550_se_calc_mclk,
519 .features = 0, 732 .features = 0,
520 }, 733 },
521}; 734};
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index 00974b59407d..849ad9a30c3a 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -61,12 +61,34 @@ struct malidp_layer {
61 u16 stride_offset; /* Offset to the first stride register. */ 61 u16 stride_offset; /* Offset to the first stride register. */
62}; 62};
63 63
64enum malidp_scaling_coeff_set {
65 MALIDP_UPSCALING_COEFFS = 1,
66 MALIDP_DOWNSCALING_1_5_COEFFS = 2,
67 MALIDP_DOWNSCALING_2_COEFFS = 3,
68 MALIDP_DOWNSCALING_2_75_COEFFS = 4,
69 MALIDP_DOWNSCALING_4_COEFFS = 5,
70};
71
72struct malidp_se_config {
73 u8 scale_enable : 1;
74 u8 enhancer_enable : 1;
75 u8 hcoeff : 3;
76 u8 vcoeff : 3;
77 u8 plane_src_id;
78 u16 input_w, input_h;
79 u16 output_w, output_h;
80 u32 h_init_phase, h_delta_phase;
81 u32 v_init_phase, v_delta_phase;
82};
83
64/* regmap features */ 84/* regmap features */
65#define MALIDP_REGMAP_HAS_CLEARIRQ (1 << 0) 85#define MALIDP_REGMAP_HAS_CLEARIRQ (1 << 0)
66 86
67struct malidp_hw_regmap { 87struct malidp_hw_regmap {
68 /* address offset of the DE register bank */ 88 /* address offset of the DE register bank */
69 /* is always 0x0000 */ 89 /* is always 0x0000 */
90 /* address offset of the DE coefficients registers */
91 const u16 coeffs_base;
70 /* address offset of the SE registers bank */ 92 /* address offset of the SE registers bank */
71 const u16 se_base; 93 const u16 se_base;
72 /* address offset of the DC registers bank */ 94 /* address offset of the DC registers bank */
@@ -151,11 +173,22 @@ struct malidp_hw_device {
151 */ 173 */
152 int (*rotmem_required)(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt); 174 int (*rotmem_required)(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt);
153 175
176 int (*se_set_scaling_coeffs)(struct malidp_hw_device *hwdev,
177 struct malidp_se_config *se_config,
178 struct malidp_se_config *old_config);
179
180 long (*se_calc_mclk)(struct malidp_hw_device *hwdev,
181 struct malidp_se_config *se_config,
182 struct videomode *vm);
183
154 u8 features; 184 u8 features;
155 185
156 u8 min_line_size; 186 u8 min_line_size;
157 u16 max_line_size; 187 u16 max_line_size;
158 188
189 /* track the device PM state */
190 bool pm_suspended;
191
159 /* size of memory used for rotating layers, up to two banks available */ 192 /* size of memory used for rotating layers, up to two banks available */
160 u32 rotation_memory[2]; 193 u32 rotation_memory[2];
161}; 194};
@@ -173,12 +206,14 @@ extern const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES];
173 206
174static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg) 207static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg)
175{ 208{
209 WARN_ON(hwdev->pm_suspended);
176 return readl(hwdev->regs + reg); 210 return readl(hwdev->regs + reg);
177} 211}
178 212
179static inline void malidp_hw_write(struct malidp_hw_device *hwdev, 213static inline void malidp_hw_write(struct malidp_hw_device *hwdev,
180 u32 value, u32 reg) 214 u32 value, u32 reg)
181{ 215{
216 WARN_ON(hwdev->pm_suspended);
182 writel(value, hwdev->regs + reg); 217 writel(value, hwdev->regs + reg);
183} 218}
184 219
@@ -243,6 +278,47 @@ static inline bool malidp_hw_pitch_valid(struct malidp_hw_device *hwdev,
243 return !(pitch & (hwdev->map.bus_align_bytes - 1)); 278 return !(pitch & (hwdev->map.bus_align_bytes - 1));
244} 279}
245 280
281/* U16.16 */
282#define FP_1_00000 0x00010000 /* 1.0 */
283#define FP_0_66667 0x0000AAAA /* 0.6667 = 1/1.5 */
284#define FP_0_50000 0x00008000 /* 0.5 = 1/2 */
285#define FP_0_36363 0x00005D17 /* 0.36363 = 1/2.75 */
286#define FP_0_25000 0x00004000 /* 0.25 = 1/4 */
287
288static inline enum malidp_scaling_coeff_set
289malidp_se_select_coeffs(u32 upscale_factor)
290{
291 return (upscale_factor >= FP_1_00000) ? MALIDP_UPSCALING_COEFFS :
292 (upscale_factor >= FP_0_66667) ? MALIDP_DOWNSCALING_1_5_COEFFS :
293 (upscale_factor >= FP_0_50000) ? MALIDP_DOWNSCALING_2_COEFFS :
294 (upscale_factor >= FP_0_36363) ? MALIDP_DOWNSCALING_2_75_COEFFS :
295 MALIDP_DOWNSCALING_4_COEFFS;
296}
297
298#undef FP_0_25000
299#undef FP_0_36363
300#undef FP_0_50000
301#undef FP_0_66667
302#undef FP_1_00000
303
304static inline void malidp_se_set_enh_coeffs(struct malidp_hw_device *hwdev)
305{
306 static const s32 enhancer_coeffs[] = {
307 -8, -8, -8, -8, 128, -8, -8, -8, -8
308 };
309 u32 val = MALIDP_SE_SET_ENH_LIMIT_LOW(MALIDP_SE_ENH_LOW_LEVEL) |
310 MALIDP_SE_SET_ENH_LIMIT_HIGH(MALIDP_SE_ENH_HIGH_LEVEL);
311 u32 image_enh = hwdev->map.se_base +
312 ((hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
313 0x10 : 0xC) + MALIDP_SE_IMAGE_ENH;
314 u32 enh_coeffs = image_enh + MALIDP_SE_ENH_COEFF0;
315 int i;
316
317 malidp_hw_write(hwdev, val, image_enh);
318 for (i = 0; i < ARRAY_SIZE(enhancer_coeffs); ++i)
319 malidp_hw_write(hwdev, enhancer_coeffs[i], enh_coeffs + i * 4);
320}
321
246/* 322/*
247 * background color components are defined as 12bits values, 323 * background color components are defined as 12bits values,
248 * they will be shifted right when stored on hardware that 324 * they will be shifted right when stored on hardware that
@@ -252,4 +328,9 @@ static inline bool malidp_hw_pitch_valid(struct malidp_hw_device *hwdev,
252#define MALIDP_BGND_COLOR_G 0x000 328#define MALIDP_BGND_COLOR_G 0x000
253#define MALIDP_BGND_COLOR_B 0x000 329#define MALIDP_BGND_COLOR_B 0x000
254 330
331#define MALIDP_COLORADJ_NUM_COEFFS 12
332#define MALIDP_COEFFTAB_NUM_COEFFS 64
333
334#define MALIDP_GAMMA_LUT_SIZE 4096
335
255#endif /* __MALIDP_HW_H__ */ 336#endif /* __MALIDP_HW_H__ */
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index d5aec082294c..814fda23cead 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -16,6 +16,7 @@
16#include <drm/drm_fb_cma_helper.h> 16#include <drm/drm_fb_cma_helper.h>
17#include <drm/drm_gem_cma_helper.h> 17#include <drm/drm_gem_cma_helper.h>
18#include <drm/drm_plane_helper.h> 18#include <drm/drm_plane_helper.h>
19#include <drm/drm_print.h>
19 20
20#include "malidp_hw.h" 21#include "malidp_hw.h"
21#include "malidp_drv.h" 22#include "malidp_drv.h"
@@ -24,6 +25,9 @@
24#define MALIDP_LAYER_FORMAT 0x000 25#define MALIDP_LAYER_FORMAT 0x000
25#define MALIDP_LAYER_CONTROL 0x004 26#define MALIDP_LAYER_CONTROL 0x004
26#define LAYER_ENABLE (1 << 0) 27#define LAYER_ENABLE (1 << 0)
28#define LAYER_FLOWCFG_MASK 7
29#define LAYER_FLOWCFG(x) (((x) & LAYER_FLOWCFG_MASK) << 1)
30#define LAYER_FLOWCFG_SCALE_SE 3
27#define LAYER_ROT_OFFSET 8 31#define LAYER_ROT_OFFSET 8
28#define LAYER_H_FLIP (1 << 10) 32#define LAYER_H_FLIP (1 << 10)
29#define LAYER_V_FLIP (1 << 11) 33#define LAYER_V_FLIP (1 << 11)
@@ -60,6 +64,27 @@ static void malidp_de_plane_destroy(struct drm_plane *plane)
60 devm_kfree(plane->dev->dev, mp); 64 devm_kfree(plane->dev->dev, mp);
61} 65}
62 66
67/*
68 * Replicate what the default ->reset hook does: free the state pointer and
69 * allocate a new empty object. We just need enough space to store
70 * a malidp_plane_state instead of a drm_plane_state.
71 */
72static void malidp_plane_reset(struct drm_plane *plane)
73{
74 struct malidp_plane_state *state = to_malidp_plane_state(plane->state);
75
76 if (state)
77 __drm_atomic_helper_plane_destroy_state(&state->base);
78 kfree(state);
79 plane->state = NULL;
80 state = kzalloc(sizeof(*state), GFP_KERNEL);
81 if (state) {
82 state->base.plane = plane;
83 state->base.rotation = DRM_ROTATE_0;
84 plane->state = &state->base;
85 }
86}
87
63static struct 88static struct
64drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane) 89drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
65{ 90{
@@ -90,26 +115,71 @@ static void malidp_destroy_plane_state(struct drm_plane *plane,
90 kfree(m_state); 115 kfree(m_state);
91} 116}
92 117
118static void malidp_plane_atomic_print_state(struct drm_printer *p,
119 const struct drm_plane_state *state)
120{
121 struct malidp_plane_state *ms = to_malidp_plane_state(state);
122
123 drm_printf(p, "\trotmem_size=%u\n", ms->rotmem_size);
124 drm_printf(p, "\tformat_id=%u\n", ms->format);
125 drm_printf(p, "\tn_planes=%u\n", ms->n_planes);
126}
127
93static const struct drm_plane_funcs malidp_de_plane_funcs = { 128static const struct drm_plane_funcs malidp_de_plane_funcs = {
94 .update_plane = drm_atomic_helper_update_plane, 129 .update_plane = drm_atomic_helper_update_plane,
95 .disable_plane = drm_atomic_helper_disable_plane, 130 .disable_plane = drm_atomic_helper_disable_plane,
96 .set_property = drm_atomic_helper_plane_set_property, 131 .set_property = drm_atomic_helper_plane_set_property,
97 .destroy = malidp_de_plane_destroy, 132 .destroy = malidp_de_plane_destroy,
98 .reset = drm_atomic_helper_plane_reset, 133 .reset = malidp_plane_reset,
99 .atomic_duplicate_state = malidp_duplicate_plane_state, 134 .atomic_duplicate_state = malidp_duplicate_plane_state,
100 .atomic_destroy_state = malidp_destroy_plane_state, 135 .atomic_destroy_state = malidp_destroy_plane_state,
136 .atomic_print_state = malidp_plane_atomic_print_state,
101}; 137};
102 138
139static int malidp_se_check_scaling(struct malidp_plane *mp,
140 struct drm_plane_state *state)
141{
142 struct drm_crtc_state *crtc_state =
143 drm_atomic_get_existing_crtc_state(state->state, state->crtc);
144 struct malidp_crtc_state *mc;
145 struct drm_rect clip = { 0 };
146 u32 src_w, src_h;
147 int ret;
148
149 if (!crtc_state)
150 return -EINVAL;
151
152 clip.x2 = crtc_state->adjusted_mode.hdisplay;
153 clip.y2 = crtc_state->adjusted_mode.vdisplay;
154 ret = drm_plane_helper_check_state(state, &clip, 0, INT_MAX, true, true);
155 if (ret)
156 return ret;
157
158 src_w = state->src_w >> 16;
159 src_h = state->src_h >> 16;
160 if ((state->crtc_w == src_w) && (state->crtc_h == src_h)) {
161 /* Scaling not necessary for this plane. */
162 mc->scaled_planes_mask &= ~(mp->layer->id);
163 return 0;
164 }
165
166 if (mp->layer->id & (DE_SMART | DE_GRAPHICS2))
167 return -EINVAL;
168
169 mc = to_malidp_crtc_state(crtc_state);
170
171 mc->scaled_planes_mask |= mp->layer->id;
172 /* Defer scaling requirements calculation to the crtc check. */
173 return 0;
174}
175
103static int malidp_de_plane_check(struct drm_plane *plane, 176static int malidp_de_plane_check(struct drm_plane *plane,
104 struct drm_plane_state *state) 177 struct drm_plane_state *state)
105{ 178{
106 struct malidp_plane *mp = to_malidp_plane(plane); 179 struct malidp_plane *mp = to_malidp_plane(plane);
107 struct malidp_plane_state *ms = to_malidp_plane_state(state); 180 struct malidp_plane_state *ms = to_malidp_plane_state(state);
108 struct drm_crtc_state *crtc_state;
109 struct drm_framebuffer *fb; 181 struct drm_framebuffer *fb;
110 struct drm_rect clip = { 0 };
111 int i, ret; 182 int i, ret;
112 u32 src_w, src_h;
113 183
114 if (!state->crtc || !state->fb) 184 if (!state->crtc || !state->fb)
115 return 0; 185 return 0;
@@ -130,9 +200,6 @@ static int malidp_de_plane_check(struct drm_plane *plane,
130 } 200 }
131 } 201 }
132 202
133 src_w = state->src_w >> 16;
134 src_h = state->src_h >> 16;
135
136 if ((state->crtc_w > mp->hwdev->max_line_size) || 203 if ((state->crtc_w > mp->hwdev->max_line_size) ||
137 (state->crtc_h > mp->hwdev->max_line_size) || 204 (state->crtc_h > mp->hwdev->max_line_size) ||
138 (state->crtc_w < mp->hwdev->min_line_size) || 205 (state->crtc_w < mp->hwdev->min_line_size) ||
@@ -149,22 +216,16 @@ static int malidp_de_plane_check(struct drm_plane *plane,
149 (state->fb->pitches[1] != state->fb->pitches[2])) 216 (state->fb->pitches[1] != state->fb->pitches[2]))
150 return -EINVAL; 217 return -EINVAL;
151 218
219 ret = malidp_se_check_scaling(mp, state);
220 if (ret)
221 return ret;
222
152 /* packed RGB888 / BGR888 can't be rotated or flipped */ 223 /* packed RGB888 / BGR888 can't be rotated or flipped */
153 if (state->rotation != DRM_ROTATE_0 && 224 if (state->rotation != DRM_ROTATE_0 &&
154 (fb->format->format == DRM_FORMAT_RGB888 || 225 (fb->format->format == DRM_FORMAT_RGB888 ||
155 fb->format->format == DRM_FORMAT_BGR888)) 226 fb->format->format == DRM_FORMAT_BGR888))
156 return -EINVAL; 227 return -EINVAL;
157 228
158 crtc_state = drm_atomic_get_existing_crtc_state(state->state, state->crtc);
159 clip.x2 = crtc_state->adjusted_mode.hdisplay;
160 clip.y2 = crtc_state->adjusted_mode.vdisplay;
161 ret = drm_plane_helper_check_state(state, &clip,
162 DRM_PLANE_HELPER_NO_SCALING,
163 DRM_PLANE_HELPER_NO_SCALING,
164 true, true);
165 if (ret)
166 return ret;
167
168 ms->rotmem_size = 0; 229 ms->rotmem_size = 0;
169 if (state->rotation & MALIDP_ROTATED_MASK) { 230 if (state->rotation & MALIDP_ROTATED_MASK) {
170 int val; 231 int val;
@@ -269,6 +330,16 @@ static void malidp_de_plane_update(struct drm_plane *plane,
269 val &= ~LAYER_COMP_MASK; 330 val &= ~LAYER_COMP_MASK;
270 val |= LAYER_COMP_PIXEL; 331 val |= LAYER_COMP_PIXEL;
271 332
333 val &= ~LAYER_FLOWCFG(LAYER_FLOWCFG_MASK);
334 if (plane->state->crtc) {
335 struct malidp_crtc_state *m =
336 to_malidp_crtc_state(plane->state->crtc->state);
337
338 if (m->scaler_config.scale_enable &&
339 m->scaler_config.plane_src_id == mp->layer->id)
340 val |= LAYER_FLOWCFG(LAYER_FLOWCFG_SCALE_SE);
341 }
342
272 /* set the 'enable layer' bit */ 343 /* set the 'enable layer' bit */
273 val |= LAYER_ENABLE; 344 val |= LAYER_ENABLE;
274 345
@@ -281,7 +352,8 @@ static void malidp_de_plane_disable(struct drm_plane *plane,
281{ 352{
282 struct malidp_plane *mp = to_malidp_plane(plane); 353 struct malidp_plane *mp = to_malidp_plane(plane);
283 354
284 malidp_hw_clearbits(mp->hwdev, LAYER_ENABLE, 355 malidp_hw_clearbits(mp->hwdev,
356 LAYER_ENABLE | LAYER_FLOWCFG(LAYER_FLOWCFG_MASK),
285 mp->layer->base + MALIDP_LAYER_CONTROL); 357 mp->layer->base + MALIDP_LAYER_CONTROL);
286} 358}
287 359
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index b816067a65c5..2039f857f77d 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -63,6 +63,8 @@
63 63
64/* bit masks that are common between products */ 64/* bit masks that are common between products */
65#define MALIDP_CFG_VALID (1 << 0) 65#define MALIDP_CFG_VALID (1 << 0)
66#define MALIDP_DISP_FUNC_GAMMA (1 << 0)
67#define MALIDP_DISP_FUNC_CADJ (1 << 4)
66#define MALIDP_DISP_FUNC_ILACED (1 << 8) 68#define MALIDP_DISP_FUNC_ILACED (1 << 8)
67 69
68/* register offsets for IRQ management */ 70/* register offsets for IRQ management */
@@ -99,6 +101,58 @@
99 101
100#define MALIDP_PRODUCT_ID(__core_id) ((u32)(__core_id) >> 16) 102#define MALIDP_PRODUCT_ID(__core_id) ((u32)(__core_id) >> 16)
101 103
104/* register offsets relative to MALIDP5x0_COEFFS_BASE */
105#define MALIDP_COLOR_ADJ_COEF 0x00000
106#define MALIDP_COEF_TABLE_ADDR 0x00030
107#define MALIDP_COEF_TABLE_DATA 0x00034
108
109/* Scaling engine registers and masks. */
110#define MALIDP_SE_SCALING_EN (1 << 0)
111#define MALIDP_SE_ALPHA_EN (1 << 1)
112#define MALIDP_SE_ENH_MASK 3
113#define MALIDP_SE_ENH(x) (((x) & MALIDP_SE_ENH_MASK) << 2)
114#define MALIDP_SE_RGBO_IF_EN (1 << 4)
115#define MALIDP550_SE_CTL_SEL_MASK 7
116#define MALIDP550_SE_CTL_VCSEL(x) \
117 (((x) & MALIDP550_SE_CTL_SEL_MASK) << 20)
118#define MALIDP550_SE_CTL_HCSEL(x) \
119 (((x) & MALIDP550_SE_CTL_SEL_MASK) << 16)
120
121/* Blocks with offsets from SE_CONTROL register. */
122#define MALIDP_SE_LAYER_CONTROL 0x14
123#define MALIDP_SE_L0_IN_SIZE 0x00
124#define MALIDP_SE_L0_OUT_SIZE 0x04
125#define MALIDP_SE_SET_V_SIZE(x) (((x) & 0x1fff) << 16)
126#define MALIDP_SE_SET_H_SIZE(x) (((x) & 0x1fff) << 0)
127#define MALIDP_SE_SCALING_CONTROL 0x24
128#define MALIDP_SE_H_INIT_PH 0x00
129#define MALIDP_SE_H_DELTA_PH 0x04
130#define MALIDP_SE_V_INIT_PH 0x08
131#define MALIDP_SE_V_DELTA_PH 0x0c
132#define MALIDP_SE_COEFFTAB_ADDR 0x10
133#define MALIDP_SE_COEFFTAB_ADDR_MASK 0x7f
134#define MALIDP_SE_V_COEFFTAB (1 << 8)
135#define MALIDP_SE_H_COEFFTAB (1 << 9)
136#define MALIDP_SE_SET_V_COEFFTAB_ADDR(x) \
137 (MALIDP_SE_V_COEFFTAB | ((x) & MALIDP_SE_COEFFTAB_ADDR_MASK))
138#define MALIDP_SE_SET_H_COEFFTAB_ADDR(x) \
139 (MALIDP_SE_H_COEFFTAB | ((x) & MALIDP_SE_COEFFTAB_ADDR_MASK))
140#define MALIDP_SE_COEFFTAB_DATA 0x14
141#define MALIDP_SE_COEFFTAB_DATA_MASK 0x3fff
142#define MALIDP_SE_SET_COEFFTAB_DATA(x) \
143 ((x) & MALIDP_SE_COEFFTAB_DATA_MASK)
144/* Enhance coeffents reigster offset */
145#define MALIDP_SE_IMAGE_ENH 0x3C
146/* ENH_LIMITS offset 0x0 */
147#define MALIDP_SE_ENH_LOW_LEVEL 24
148#define MALIDP_SE_ENH_HIGH_LEVEL 63
149#define MALIDP_SE_ENH_LIMIT_MASK 0xfff
150#define MALIDP_SE_SET_ENH_LIMIT_LOW(x) \
151 ((x) & MALIDP_SE_ENH_LIMIT_MASK)
152#define MALIDP_SE_SET_ENH_LIMIT_HIGH(x) \
153 (((x) & MALIDP_SE_ENH_LIMIT_MASK) << 16)
154#define MALIDP_SE_ENH_COEFF0 0x04
155
102/* register offsets and bits specific to DP500 */ 156/* register offsets and bits specific to DP500 */
103#define MALIDP500_ADDR_SPACE_SIZE 0x01000 157#define MALIDP500_ADDR_SPACE_SIZE 0x01000
104#define MALIDP500_DC_BASE 0x00000 158#define MALIDP500_DC_BASE 0x00000
@@ -120,6 +174,18 @@
120#define MALIDP500_COLOR_ADJ_COEF 0x00078 174#define MALIDP500_COLOR_ADJ_COEF 0x00078
121#define MALIDP500_COEF_TABLE_ADDR 0x000a8 175#define MALIDP500_COEF_TABLE_ADDR 0x000a8
122#define MALIDP500_COEF_TABLE_DATA 0x000ac 176#define MALIDP500_COEF_TABLE_DATA 0x000ac
177
178/*
179 * The YUV2RGB coefficients on the DP500 are not in the video layer's register
180 * block. They belong in a separate block above the layer's registers, hence
181 * the negative offset.
182 */
183#define MALIDP500_LV_YUV2RGB ((s16)(-0xB8))
184/*
185 * To match DP550/650, the start of the coeffs registers is
186 * at COLORADJ_COEFF0 instead of at YUV_RGB_COEF1.
187 */
188#define MALIDP500_COEFFS_BASE 0x00078
123#define MALIDP500_DE_LV_BASE 0x00100 189#define MALIDP500_DE_LV_BASE 0x00100
124#define MALIDP500_DE_LV_PTR_BASE 0x00124 190#define MALIDP500_DE_LV_PTR_BASE 0x00124
125#define MALIDP500_DE_LG1_BASE 0x00200 191#define MALIDP500_DE_LG1_BASE 0x00200
@@ -127,6 +193,7 @@
127#define MALIDP500_DE_LG2_BASE 0x00300 193#define MALIDP500_DE_LG2_BASE 0x00300
128#define MALIDP500_DE_LG2_PTR_BASE 0x0031c 194#define MALIDP500_DE_LG2_PTR_BASE 0x0031c
129#define MALIDP500_SE_BASE 0x00c00 195#define MALIDP500_SE_BASE 0x00c00
196#define MALIDP500_SE_CONTROL 0x00c0c
130#define MALIDP500_SE_PTR_BASE 0x00e0c 197#define MALIDP500_SE_PTR_BASE 0x00e0c
131#define MALIDP500_DC_IRQ_BASE 0x00f00 198#define MALIDP500_DC_IRQ_BASE 0x00f00
132#define MALIDP500_CONFIG_VALID 0x00f00 199#define MALIDP500_CONFIG_VALID 0x00f00
@@ -145,9 +212,7 @@
145#define MALIDP550_DE_DISP_SIDEBAND 0x00040 212#define MALIDP550_DE_DISP_SIDEBAND 0x00040
146#define MALIDP550_DE_BGND_COLOR 0x00044 213#define MALIDP550_DE_BGND_COLOR 0x00044
147#define MALIDP550_DE_OUTPUT_DEPTH 0x0004c 214#define MALIDP550_DE_OUTPUT_DEPTH 0x0004c
148#define MALIDP550_DE_COLOR_COEF 0x00050 215#define MALIDP550_COEFFS_BASE 0x00050
149#define MALIDP550_DE_COEF_TABLE_ADDR 0x00080
150#define MALIDP550_DE_COEF_TABLE_DATA 0x00084
151#define MALIDP550_DE_LV1_BASE 0x00100 216#define MALIDP550_DE_LV1_BASE 0x00100
152#define MALIDP550_DE_LV1_PTR_BASE 0x00124 217#define MALIDP550_DE_LV1_PTR_BASE 0x00124
153#define MALIDP550_DE_LV2_BASE 0x00200 218#define MALIDP550_DE_LV2_BASE 0x00200
@@ -158,6 +223,7 @@
158#define MALIDP550_DE_LS_PTR_BASE 0x0042c 223#define MALIDP550_DE_LS_PTR_BASE 0x0042c
159#define MALIDP550_DE_PERF_BASE 0x00500 224#define MALIDP550_DE_PERF_BASE 0x00500
160#define MALIDP550_SE_BASE 0x08000 225#define MALIDP550_SE_BASE 0x08000
226#define MALIDP550_SE_CONTROL 0x08010
161#define MALIDP550_DC_BASE 0x0c000 227#define MALIDP550_DC_BASE 0x0c000
162#define MALIDP550_DC_CONTROL 0x0c010 228#define MALIDP550_DC_CONTROL 0x0c010
163#define MALIDP550_DC_CONFIG_REQ (1 << 16) 229#define MALIDP550_DC_CONFIG_REQ (1 << 16)
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 1597458d884e..d6c2a5d190eb 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -529,10 +529,10 @@ static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
529 .map_dma_buf = armada_gem_prime_map_dma_buf, 529 .map_dma_buf = armada_gem_prime_map_dma_buf,
530 .unmap_dma_buf = armada_gem_prime_unmap_dma_buf, 530 .unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
531 .release = drm_gem_dmabuf_release, 531 .release = drm_gem_dmabuf_release,
532 .kmap_atomic = armada_gem_dmabuf_no_kmap, 532 .map_atomic = armada_gem_dmabuf_no_kmap,
533 .kunmap_atomic = armada_gem_dmabuf_no_kunmap, 533 .unmap_atomic = armada_gem_dmabuf_no_kunmap,
534 .kmap = armada_gem_dmabuf_no_kmap, 534 .map = armada_gem_dmabuf_no_kmap,
535 .kunmap = armada_gem_dmabuf_no_kunmap, 535 .unmap = armada_gem_dmabuf_no_kunmap,
536 .mmap = armada_gem_dmabuf_mmap, 536 .mmap = armada_gem_dmabuf_mmap,
537}; 537};
538 538
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 47b78e52691c..aaef0a652f10 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -645,7 +645,8 @@ static void ast_crtc_reset(struct drm_crtc *crtc)
645} 645}
646 646
647static int ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 647static int ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
648 u16 *blue, uint32_t size) 648 u16 *blue, uint32_t size,
649 struct drm_modeset_acquire_ctx *ctx)
649{ 650{
650 struct ast_crtc *ast_crtc = to_ast_crtc(crtc); 651 struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
651 int i; 652 int i;
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 50c910efa13d..e879496b8a42 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -236,6 +236,7 @@ struct ttm_bo_driver ast_bo_driver = {
236 .verify_access = ast_bo_verify_access, 236 .verify_access = ast_bo_verify_access,
237 .io_mem_reserve = &ast_ttm_io_mem_reserve, 237 .io_mem_reserve = &ast_ttm_io_mem_reserve,
238 .io_mem_free = &ast_ttm_io_mem_free, 238 .io_mem_free = &ast_ttm_io_mem_free,
239 .io_mem_pfn = ttm_bo_default_io_mem_pfn,
239}; 240};
240 241
241int ast_mm_init(struct ast_private *ast) 242int ast_mm_init(struct ast_private *ast)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index e7799b6ee829..65a3bd7a0c00 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -22,7 +22,7 @@
22#include <linux/of_graph.h> 22#include <linux/of_graph.h>
23 23
24#include <drm/drmP.h> 24#include <drm/drmP.h>
25#include <drm/drm_panel.h> 25#include <drm/drm_of.h>
26 26
27#include "atmel_hlcdc_dc.h" 27#include "atmel_hlcdc_dc.h"
28 28
@@ -152,29 +152,11 @@ static const struct drm_connector_funcs atmel_hlcdc_panel_connector_funcs = {
152 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 152 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
153}; 153};
154 154
155static int atmel_hlcdc_check_endpoint(struct drm_device *dev,
156 const struct of_endpoint *ep)
157{
158 struct device_node *np;
159 void *obj;
160
161 np = of_graph_get_remote_port_parent(ep->local_node);
162
163 obj = of_drm_find_panel(np);
164 if (!obj)
165 obj = of_drm_find_bridge(np);
166
167 of_node_put(np);
168
169 return obj ? 0 : -EPROBE_DEFER;
170}
171
172static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, 155static int atmel_hlcdc_attach_endpoint(struct drm_device *dev,
173 const struct of_endpoint *ep) 156 const struct device_node *np)
174{ 157{
175 struct atmel_hlcdc_dc *dc = dev->dev_private; 158 struct atmel_hlcdc_dc *dc = dev->dev_private;
176 struct atmel_hlcdc_rgb_output *output; 159 struct atmel_hlcdc_rgb_output *output;
177 struct device_node *np;
178 struct drm_panel *panel; 160 struct drm_panel *panel;
179 struct drm_bridge *bridge; 161 struct drm_bridge *bridge;
180 int ret; 162 int ret;
@@ -195,13 +177,11 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev,
195 177
196 output->encoder.possible_crtcs = 0x1; 178 output->encoder.possible_crtcs = 0x1;
197 179
198 np = of_graph_get_remote_port_parent(ep->local_node); 180 ret = drm_of_find_panel_or_bridge(np, 0, 0, &panel, &bridge);
199 181 if (ret)
200 ret = -EPROBE_DEFER; 182 return ret;
201 183
202 panel = of_drm_find_panel(np);
203 if (panel) { 184 if (panel) {
204 of_node_put(np);
205 output->connector.dpms = DRM_MODE_DPMS_OFF; 185 output->connector.dpms = DRM_MODE_DPMS_OFF;
206 output->connector.polled = DRM_CONNECTOR_POLL_CONNECT; 186 output->connector.polled = DRM_CONNECTOR_POLL_CONNECT;
207 drm_connector_helper_add(&output->connector, 187 drm_connector_helper_add(&output->connector,
@@ -226,9 +206,6 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev,
226 return 0; 206 return 0;
227 } 207 }
228 208
229 bridge = of_drm_find_bridge(np);
230 of_node_put(np);
231
232 if (bridge) { 209 if (bridge) {
233 ret = drm_bridge_attach(&output->encoder, bridge, NULL); 210 ret = drm_bridge_attach(&output->encoder, bridge, NULL);
234 if (!ret) 211 if (!ret)
@@ -243,31 +220,22 @@ err_encoder_cleanup:
243 220
244int atmel_hlcdc_create_outputs(struct drm_device *dev) 221int atmel_hlcdc_create_outputs(struct drm_device *dev)
245{ 222{
246 struct device_node *ep_np = NULL; 223 struct device_node *remote;
247 struct of_endpoint ep; 224 int ret = -ENODEV;
248 int ret; 225 int endpoint = 0;
249 226
250 for_each_endpoint_of_node(dev->dev->of_node, ep_np) { 227 while (true) {
251 ret = of_graph_parse_endpoint(ep_np, &ep); 228 /* Loop thru possible multiple connections to the output */
252 if (!ret) 229 remote = of_graph_get_remote_node(dev->dev->of_node, 0,
253 ret = atmel_hlcdc_check_endpoint(dev, &ep); 230 endpoint++);
254 231 if (!remote)
255 if (ret) { 232 break;
256 of_node_put(ep_np); 233
257 return ret; 234 ret = atmel_hlcdc_attach_endpoint(dev, remote);
258 } 235 of_node_put(remote);
259 } 236 if (ret)
260
261 for_each_endpoint_of_node(dev->dev->of_node, ep_np) {
262 ret = of_graph_parse_endpoint(ep_np, &ep);
263 if (!ret)
264 ret = atmel_hlcdc_attach_endpoint(dev, &ep);
265
266 if (ret) {
267 of_node_put(ep_np);
268 return ret; 237 return ret;
269 }
270 } 238 }
271 239
272 return 0; 240 return ret;
273} 241}
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 857755ac2d70..c4cadb638460 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -205,6 +205,7 @@ struct ttm_bo_driver bochs_bo_driver = {
205 .verify_access = bochs_bo_verify_access, 205 .verify_access = bochs_bo_verify_access,
206 .io_mem_reserve = &bochs_ttm_io_mem_reserve, 206 .io_mem_reserve = &bochs_ttm_io_mem_reserve,
207 .io_mem_free = &bochs_ttm_io_mem_free, 207 .io_mem_free = &bochs_ttm_io_mem_free,
208 .io_mem_pfn = ttm_bo_default_io_mem_pfn,
208}; 209};
209 210
210int bochs_mm_init(struct bochs_device *bochs) 211int bochs_mm_init(struct bochs_device *bochs)
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
index 8b210373cfa2..ac804f81e2f6 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -232,7 +232,6 @@ void adv7533_detach_dsi(struct adv7511 *adv)
232int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv) 232int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
233{ 233{
234 u32 num_lanes; 234 u32 num_lanes;
235 struct device_node *endpoint;
236 235
237 of_property_read_u32(np, "adi,dsi-lanes", &num_lanes); 236 of_property_read_u32(np, "adi,dsi-lanes", &num_lanes);
238 237
@@ -241,17 +240,10 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
241 240
242 adv->num_dsi_lanes = num_lanes; 241 adv->num_dsi_lanes = num_lanes;
243 242
244 endpoint = of_graph_get_next_endpoint(np, NULL); 243 adv->host_node = of_graph_get_remote_node(np, 0, 0);
245 if (!endpoint) 244 if (!adv->host_node)
246 return -ENODEV; 245 return -ENODEV;
247 246
248 adv->host_node = of_graph_get_remote_port_parent(endpoint);
249 if (!adv->host_node) {
250 of_node_put(endpoint);
251 return -ENODEV;
252 }
253
254 of_node_put(endpoint);
255 of_node_put(adv->host_node); 247 of_node_put(adv->host_node);
256 248
257 adv->use_timing_gen = !of_property_read_bool(np, 249 adv->use_timing_gen = !of_property_read_bool(np,
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index c26997afd3cf..4c758ed51939 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1439,13 +1439,19 @@ void analogix_dp_unbind(struct device *dev, struct device *master,
1439 struct analogix_dp_device *dp = dev_get_drvdata(dev); 1439 struct analogix_dp_device *dp = dev_get_drvdata(dev);
1440 1440
1441 analogix_dp_bridge_disable(dp->bridge); 1441 analogix_dp_bridge_disable(dp->bridge);
1442 dp->connector.funcs->destroy(&dp->connector);
1443 dp->encoder->funcs->destroy(dp->encoder);
1442 1444
1443 if (dp->plat_data->panel) { 1445 if (dp->plat_data->panel) {
1444 if (drm_panel_unprepare(dp->plat_data->panel)) 1446 if (drm_panel_unprepare(dp->plat_data->panel))
1445 DRM_ERROR("failed to turnoff the panel\n"); 1447 DRM_ERROR("failed to turnoff the panel\n");
1448 if (drm_panel_detach(dp->plat_data->panel))
1449 DRM_ERROR("failed to detach the panel\n");
1446 } 1450 }
1447 1451
1452 drm_dp_aux_unregister(&dp->aux);
1448 pm_runtime_disable(dev); 1453 pm_runtime_disable(dev);
1454 clk_disable_unprepare(dp->clock);
1449} 1455}
1450EXPORT_SYMBOL_GPL(analogix_dp_unbind); 1456EXPORT_SYMBOL_GPL(analogix_dp_unbind);
1451 1457
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index 63e113bd21d2..831a606c4706 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -154,21 +154,12 @@ static const struct drm_bridge_funcs dumb_vga_bridge_funcs = {
154 154
155static struct i2c_adapter *dumb_vga_retrieve_ddc(struct device *dev) 155static struct i2c_adapter *dumb_vga_retrieve_ddc(struct device *dev)
156{ 156{
157 struct device_node *end_node, *phandle, *remote; 157 struct device_node *phandle, *remote;
158 struct i2c_adapter *ddc; 158 struct i2c_adapter *ddc;
159 159
160 end_node = of_graph_get_endpoint_by_regs(dev->of_node, 1, -1); 160 remote = of_graph_get_remote_node(dev->of_node, 1, -1);
161 if (!end_node) { 161 if (!remote)
162 dev_err(dev, "Missing connector endpoint\n");
163 return ERR_PTR(-ENODEV);
164 }
165
166 remote = of_graph_get_remote_port_parent(end_node);
167 of_node_put(end_node);
168 if (!remote) {
169 dev_err(dev, "Enable to parse remote node\n");
170 return ERR_PTR(-EINVAL); 162 return ERR_PTR(-EINVAL);
171 }
172 163
173 phandle = of_parse_phandle(remote, "ddc-i2c-bus", 0); 164 phandle = of_parse_phandle(remote, "ddc-i2c-bus", 0);
174 of_node_put(remote); 165 of_node_put(remote);
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index cfc606a13a6d..11f11086a68f 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -279,10 +279,6 @@ static int ge_b850v3_lvds_init(struct device *dev)
279 return -ENOMEM; 279 return -ENOMEM;
280 } 280 }
281 281
282 ge_b850v3_lvds_ptr->bridge.funcs = &ge_b850v3_lvds_funcs;
283 ge_b850v3_lvds_ptr->bridge.of_node = dev->of_node;
284 drm_bridge_add(&ge_b850v3_lvds_ptr->bridge);
285
286success: 282success:
287 mutex_unlock(&ge_b850v3_lvds_dev_mutex); 283 mutex_unlock(&ge_b850v3_lvds_dev_mutex);
288 return 0; 284 return 0;
@@ -317,6 +313,11 @@ static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c,
317 ge_b850v3_lvds_ptr->stdp4028_i2c = stdp4028_i2c; 313 ge_b850v3_lvds_ptr->stdp4028_i2c = stdp4028_i2c;
318 i2c_set_clientdata(stdp4028_i2c, ge_b850v3_lvds_ptr); 314 i2c_set_clientdata(stdp4028_i2c, ge_b850v3_lvds_ptr);
319 315
316 /* drm bridge initialization */
317 ge_b850v3_lvds_ptr->bridge.funcs = &ge_b850v3_lvds_funcs;
318 ge_b850v3_lvds_ptr->bridge.of_node = dev->of_node;
319 drm_bridge_add(&ge_b850v3_lvds_ptr->bridge);
320
320 /* Clear pending interrupts since power up. */ 321 /* Clear pending interrupts since power up. */
321 i2c_smbus_write_word_data(stdp4028_i2c, 322 i2c_smbus_write_word_data(stdp4028_i2c,
322 STDP4028_DPTX_IRQ_STS_REG, 323 STDP4028_DPTX_IRQ_STS_REG,
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 27f98c518dde..351704390d02 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -20,8 +20,8 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/of_gpio.h> 22#include <linux/of_gpio.h>
23#include <linux/of_graph.h>
24 23
24#include <drm/drm_of.h>
25#include <drm/drm_panel.h> 25#include <drm/drm_panel.h>
26 26
27#include "drm_crtc.h" 27#include "drm_crtc.h"
@@ -292,7 +292,6 @@ static int ptn3460_probe(struct i2c_client *client,
292{ 292{
293 struct device *dev = &client->dev; 293 struct device *dev = &client->dev;
294 struct ptn3460_bridge *ptn_bridge; 294 struct ptn3460_bridge *ptn_bridge;
295 struct device_node *endpoint, *panel_node;
296 int ret; 295 int ret;
297 296
298 ptn_bridge = devm_kzalloc(dev, sizeof(*ptn_bridge), GFP_KERNEL); 297 ptn_bridge = devm_kzalloc(dev, sizeof(*ptn_bridge), GFP_KERNEL);
@@ -300,16 +299,9 @@ static int ptn3460_probe(struct i2c_client *client,
300 return -ENOMEM; 299 return -ENOMEM;
301 } 300 }
302 301
303 endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); 302 ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0, &ptn_bridge->panel, NULL);
304 if (endpoint) { 303 if (ret)
305 panel_node = of_graph_get_remote_port_parent(endpoint); 304 return ret;
306 if (panel_node) {
307 ptn_bridge->panel = of_drm_find_panel(panel_node);
308 of_node_put(panel_node);
309 if (!ptn_bridge->panel)
310 return -EPROBE_DEFER;
311 }
312 }
313 305
314 ptn_bridge->client = client; 306 ptn_bridge->client = client;
315 307
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index ac8cc5b50d9f..1dcec3b97e67 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -22,10 +22,10 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/of.h> 23#include <linux/of.h>
24#include <linux/of_device.h> 24#include <linux/of_device.h>
25#include <linux/of_graph.h>
26#include <linux/pm.h> 25#include <linux/pm.h>
27#include <linux/regulator/consumer.h> 26#include <linux/regulator/consumer.h>
28 27
28#include <drm/drm_of.h>
29#include <drm/drm_panel.h> 29#include <drm/drm_panel.h>
30 30
31#include "drmP.h" 31#include "drmP.h"
@@ -536,7 +536,6 @@ static int ps8622_probe(struct i2c_client *client,
536 const struct i2c_device_id *id) 536 const struct i2c_device_id *id)
537{ 537{
538 struct device *dev = &client->dev; 538 struct device *dev = &client->dev;
539 struct device_node *endpoint, *panel_node;
540 struct ps8622_bridge *ps8622; 539 struct ps8622_bridge *ps8622;
541 int ret; 540 int ret;
542 541
@@ -544,16 +543,9 @@ static int ps8622_probe(struct i2c_client *client,
544 if (!ps8622) 543 if (!ps8622)
545 return -ENOMEM; 544 return -ENOMEM;
546 545
547 endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); 546 ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0, &ps8622->panel, NULL);
548 if (endpoint) { 547 if (ret)
549 panel_node = of_graph_get_remote_port_parent(endpoint); 548 return ret;
550 if (panel_node) {
551 ps8622->panel = of_drm_find_panel(panel_node);
552 of_node_put(panel_node);
553 if (!ps8622->panel)
554 return -EPROBE_DEFER;
555 }
556 }
557 549
558 ps8622->client = client; 550 ps8622->client = client;
559 551
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 32f02e92e0b9..4e1f54a675d8 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -30,18 +30,15 @@
30#include <drm/drm_encoder_slave.h> 30#include <drm/drm_encoder_slave.h>
31#include <drm/bridge/dw_hdmi.h> 31#include <drm/bridge/dw_hdmi.h>
32 32
33#include <uapi/linux/media-bus-format.h>
34#include <uapi/linux/videodev2.h>
35
33#include "dw-hdmi.h" 36#include "dw-hdmi.h"
34#include "dw-hdmi-audio.h" 37#include "dw-hdmi-audio.h"
35 38
36#define DDC_SEGMENT_ADDR 0x30 39#define DDC_SEGMENT_ADDR 0x30
37#define HDMI_EDID_LEN 512 40#define HDMI_EDID_LEN 512
38 41
39#define RGB 0
40#define YCBCR444 1
41#define YCBCR422_16BITS 2
42#define YCBCR422_8BITS 3
43#define XVYCC444 4
44
45enum hdmi_datamap { 42enum hdmi_datamap {
46 RGB444_8B = 0x01, 43 RGB444_8B = 0x01,
47 RGB444_10B = 0x03, 44 RGB444_10B = 0x03,
@@ -95,10 +92,10 @@ struct hdmi_vmode {
95}; 92};
96 93
97struct hdmi_data_info { 94struct hdmi_data_info {
98 unsigned int enc_in_format; 95 unsigned int enc_in_bus_format;
99 unsigned int enc_out_format; 96 unsigned int enc_out_bus_format;
100 unsigned int enc_color_depth; 97 unsigned int enc_in_encoding;
101 unsigned int colorimetry; 98 unsigned int enc_out_encoding;
102 unsigned int pix_repet_factor; 99 unsigned int pix_repet_factor;
103 unsigned int hdcp_enable; 100 unsigned int hdcp_enable;
104 struct hdmi_vmode video_mode; 101 struct hdmi_vmode video_mode;
@@ -567,6 +564,78 @@ void dw_hdmi_audio_disable(struct dw_hdmi *hdmi)
567} 564}
568EXPORT_SYMBOL_GPL(dw_hdmi_audio_disable); 565EXPORT_SYMBOL_GPL(dw_hdmi_audio_disable);
569 566
567static bool hdmi_bus_fmt_is_rgb(unsigned int bus_format)
568{
569 switch (bus_format) {
570 case MEDIA_BUS_FMT_RGB888_1X24:
571 case MEDIA_BUS_FMT_RGB101010_1X30:
572 case MEDIA_BUS_FMT_RGB121212_1X36:
573 case MEDIA_BUS_FMT_RGB161616_1X48:
574 return true;
575
576 default:
577 return false;
578 }
579}
580
581static bool hdmi_bus_fmt_is_yuv444(unsigned int bus_format)
582{
583 switch (bus_format) {
584 case MEDIA_BUS_FMT_YUV8_1X24:
585 case MEDIA_BUS_FMT_YUV10_1X30:
586 case MEDIA_BUS_FMT_YUV12_1X36:
587 case MEDIA_BUS_FMT_YUV16_1X48:
588 return true;
589
590 default:
591 return false;
592 }
593}
594
595static bool hdmi_bus_fmt_is_yuv422(unsigned int bus_format)
596{
597 switch (bus_format) {
598 case MEDIA_BUS_FMT_UYVY8_1X16:
599 case MEDIA_BUS_FMT_UYVY10_1X20:
600 case MEDIA_BUS_FMT_UYVY12_1X24:
601 return true;
602
603 default:
604 return false;
605 }
606}
607
608static int hdmi_bus_fmt_color_depth(unsigned int bus_format)
609{
610 switch (bus_format) {
611 case MEDIA_BUS_FMT_RGB888_1X24:
612 case MEDIA_BUS_FMT_YUV8_1X24:
613 case MEDIA_BUS_FMT_UYVY8_1X16:
614 case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
615 return 8;
616
617 case MEDIA_BUS_FMT_RGB101010_1X30:
618 case MEDIA_BUS_FMT_YUV10_1X30:
619 case MEDIA_BUS_FMT_UYVY10_1X20:
620 case MEDIA_BUS_FMT_UYYVYY10_0_5X30:
621 return 10;
622
623 case MEDIA_BUS_FMT_RGB121212_1X36:
624 case MEDIA_BUS_FMT_YUV12_1X36:
625 case MEDIA_BUS_FMT_UYVY12_1X24:
626 case MEDIA_BUS_FMT_UYYVYY12_0_5X36:
627 return 12;
628
629 case MEDIA_BUS_FMT_RGB161616_1X48:
630 case MEDIA_BUS_FMT_YUV16_1X48:
631 case MEDIA_BUS_FMT_UYYVYY16_0_5X48:
632 return 16;
633
634 default:
635 return 0;
636 }
637}
638
570/* 639/*
571 * this submodule is responsible for the video data synchronization. 640 * this submodule is responsible for the video data synchronization.
572 * for example, for RGB 4:4:4 input, the data map is defined as 641 * for example, for RGB 4:4:4 input, the data map is defined as
@@ -579,37 +648,49 @@ static void hdmi_video_sample(struct dw_hdmi *hdmi)
579 int color_format = 0; 648 int color_format = 0;
580 u8 val; 649 u8 val;
581 650
582 if (hdmi->hdmi_data.enc_in_format == RGB) { 651 switch (hdmi->hdmi_data.enc_in_bus_format) {
583 if (hdmi->hdmi_data.enc_color_depth == 8) 652 case MEDIA_BUS_FMT_RGB888_1X24:
584 color_format = 0x01; 653 color_format = 0x01;
585 else if (hdmi->hdmi_data.enc_color_depth == 10) 654 break;
586 color_format = 0x03; 655 case MEDIA_BUS_FMT_RGB101010_1X30:
587 else if (hdmi->hdmi_data.enc_color_depth == 12) 656 color_format = 0x03;
588 color_format = 0x05; 657 break;
589 else if (hdmi->hdmi_data.enc_color_depth == 16) 658 case MEDIA_BUS_FMT_RGB121212_1X36:
590 color_format = 0x07; 659 color_format = 0x05;
591 else 660 break;
592 return; 661 case MEDIA_BUS_FMT_RGB161616_1X48:
593 } else if (hdmi->hdmi_data.enc_in_format == YCBCR444) { 662 color_format = 0x07;
594 if (hdmi->hdmi_data.enc_color_depth == 8) 663 break;
595 color_format = 0x09; 664
596 else if (hdmi->hdmi_data.enc_color_depth == 10) 665 case MEDIA_BUS_FMT_YUV8_1X24:
597 color_format = 0x0B; 666 case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
598 else if (hdmi->hdmi_data.enc_color_depth == 12) 667 color_format = 0x09;
599 color_format = 0x0D; 668 break;
600 else if (hdmi->hdmi_data.enc_color_depth == 16) 669 case MEDIA_BUS_FMT_YUV10_1X30:
601 color_format = 0x0F; 670 case MEDIA_BUS_FMT_UYYVYY10_0_5X30:
602 else 671 color_format = 0x0B;
603 return; 672 break;
604 } else if (hdmi->hdmi_data.enc_in_format == YCBCR422_8BITS) { 673 case MEDIA_BUS_FMT_YUV12_1X36:
605 if (hdmi->hdmi_data.enc_color_depth == 8) 674 case MEDIA_BUS_FMT_UYYVYY12_0_5X36:
606 color_format = 0x16; 675 color_format = 0x0D;
607 else if (hdmi->hdmi_data.enc_color_depth == 10) 676 break;
608 color_format = 0x14; 677 case MEDIA_BUS_FMT_YUV16_1X48:
609 else if (hdmi->hdmi_data.enc_color_depth == 12) 678 case MEDIA_BUS_FMT_UYYVYY16_0_5X48:
610 color_format = 0x12; 679 color_format = 0x0F;
611 else 680 break;
612 return; 681
682 case MEDIA_BUS_FMT_UYVY8_1X16:
683 color_format = 0x16;
684 break;
685 case MEDIA_BUS_FMT_UYVY10_1X20:
686 color_format = 0x14;
687 break;
688 case MEDIA_BUS_FMT_UYVY12_1X24:
689 color_format = 0x12;
690 break;
691
692 default:
693 return;
613 } 694 }
614 695
615 val = HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_DISABLE | 696 val = HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_DISABLE |
@@ -632,26 +713,30 @@ static void hdmi_video_sample(struct dw_hdmi *hdmi)
632 713
633static int is_color_space_conversion(struct dw_hdmi *hdmi) 714static int is_color_space_conversion(struct dw_hdmi *hdmi)
634{ 715{
635 return hdmi->hdmi_data.enc_in_format != hdmi->hdmi_data.enc_out_format; 716 return hdmi->hdmi_data.enc_in_bus_format != hdmi->hdmi_data.enc_out_bus_format;
636} 717}
637 718
638static int is_color_space_decimation(struct dw_hdmi *hdmi) 719static int is_color_space_decimation(struct dw_hdmi *hdmi)
639{ 720{
640 if (hdmi->hdmi_data.enc_out_format != YCBCR422_8BITS) 721 if (!hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format))
641 return 0; 722 return 0;
642 if (hdmi->hdmi_data.enc_in_format == RGB || 723
643 hdmi->hdmi_data.enc_in_format == YCBCR444) 724 if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_in_bus_format) ||
725 hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_in_bus_format))
644 return 1; 726 return 1;
727
645 return 0; 728 return 0;
646} 729}
647 730
648static int is_color_space_interpolation(struct dw_hdmi *hdmi) 731static int is_color_space_interpolation(struct dw_hdmi *hdmi)
649{ 732{
650 if (hdmi->hdmi_data.enc_in_format != YCBCR422_8BITS) 733 if (!hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_in_bus_format))
651 return 0; 734 return 0;
652 if (hdmi->hdmi_data.enc_out_format == RGB || 735
653 hdmi->hdmi_data.enc_out_format == YCBCR444) 736 if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format) ||
737 hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format))
654 return 1; 738 return 1;
739
655 return 0; 740 return 0;
656} 741}
657 742
@@ -662,15 +747,16 @@ static void dw_hdmi_update_csc_coeffs(struct dw_hdmi *hdmi)
662 u32 csc_scale = 1; 747 u32 csc_scale = 1;
663 748
664 if (is_color_space_conversion(hdmi)) { 749 if (is_color_space_conversion(hdmi)) {
665 if (hdmi->hdmi_data.enc_out_format == RGB) { 750 if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) {
666 if (hdmi->hdmi_data.colorimetry == 751 if (hdmi->hdmi_data.enc_out_encoding ==
667 HDMI_COLORIMETRY_ITU_601) 752 V4L2_YCBCR_ENC_601)
668 csc_coeff = &csc_coeff_rgb_out_eitu601; 753 csc_coeff = &csc_coeff_rgb_out_eitu601;
669 else 754 else
670 csc_coeff = &csc_coeff_rgb_out_eitu709; 755 csc_coeff = &csc_coeff_rgb_out_eitu709;
671 } else if (hdmi->hdmi_data.enc_in_format == RGB) { 756 } else if (hdmi_bus_fmt_is_rgb(
672 if (hdmi->hdmi_data.colorimetry == 757 hdmi->hdmi_data.enc_in_bus_format)) {
673 HDMI_COLORIMETRY_ITU_601) 758 if (hdmi->hdmi_data.enc_out_encoding ==
759 V4L2_YCBCR_ENC_601)
674 csc_coeff = &csc_coeff_rgb_in_eitu601; 760 csc_coeff = &csc_coeff_rgb_in_eitu601;
675 else 761 else
676 csc_coeff = &csc_coeff_rgb_in_eitu709; 762 csc_coeff = &csc_coeff_rgb_in_eitu709;
@@ -708,16 +794,23 @@ static void hdmi_video_csc(struct dw_hdmi *hdmi)
708 else if (is_color_space_decimation(hdmi)) 794 else if (is_color_space_decimation(hdmi))
709 decimation = HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA3; 795 decimation = HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA3;
710 796
711 if (hdmi->hdmi_data.enc_color_depth == 8) 797 switch (hdmi_bus_fmt_color_depth(hdmi->hdmi_data.enc_out_bus_format)) {
798 case 8:
712 color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_24BPP; 799 color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_24BPP;
713 else if (hdmi->hdmi_data.enc_color_depth == 10) 800 break;
801 case 10:
714 color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_30BPP; 802 color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_30BPP;
715 else if (hdmi->hdmi_data.enc_color_depth == 12) 803 break;
804 case 12:
716 color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_36BPP; 805 color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_36BPP;
717 else if (hdmi->hdmi_data.enc_color_depth == 16) 806 break;
807 case 16:
718 color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_48BPP; 808 color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_48BPP;
719 else 809 break;
810
811 default:
720 return; 812 return;
813 }
721 814
722 /* Configure the CSC registers */ 815 /* Configure the CSC registers */
723 hdmi_writeb(hdmi, interpolation | decimation, HDMI_CSC_CFG); 816 hdmi_writeb(hdmi, interpolation | decimation, HDMI_CSC_CFG);
@@ -740,32 +833,43 @@ static void hdmi_video_packetize(struct dw_hdmi *hdmi)
740 struct hdmi_data_info *hdmi_data = &hdmi->hdmi_data; 833 struct hdmi_data_info *hdmi_data = &hdmi->hdmi_data;
741 u8 val, vp_conf; 834 u8 val, vp_conf;
742 835
743 if (hdmi_data->enc_out_format == RGB || 836 if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format) ||
744 hdmi_data->enc_out_format == YCBCR444) { 837 hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format)) {
745 if (!hdmi_data->enc_color_depth) { 838 switch (hdmi_bus_fmt_color_depth(
746 output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS; 839 hdmi->hdmi_data.enc_out_bus_format)) {
747 } else if (hdmi_data->enc_color_depth == 8) { 840 case 8:
748 color_depth = 4; 841 color_depth = 4;
749 output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS; 842 output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS;
750 } else if (hdmi_data->enc_color_depth == 10) { 843 break;
844 case 10:
751 color_depth = 5; 845 color_depth = 5;
752 } else if (hdmi_data->enc_color_depth == 12) { 846 break;
847 case 12:
753 color_depth = 6; 848 color_depth = 6;
754 } else if (hdmi_data->enc_color_depth == 16) { 849 break;
850 case 16:
755 color_depth = 7; 851 color_depth = 7;
756 } else { 852 break;
757 return; 853 default:
854 output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS;
758 } 855 }
759 } else if (hdmi_data->enc_out_format == YCBCR422_8BITS) { 856 } else if (hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format)) {
760 if (!hdmi_data->enc_color_depth || 857 switch (hdmi_bus_fmt_color_depth(
761 hdmi_data->enc_color_depth == 8) 858 hdmi->hdmi_data.enc_out_bus_format)) {
859 case 0:
860 case 8:
762 remap_size = HDMI_VP_REMAP_YCC422_16bit; 861 remap_size = HDMI_VP_REMAP_YCC422_16bit;
763 else if (hdmi_data->enc_color_depth == 10) 862 break;
863 case 10:
764 remap_size = HDMI_VP_REMAP_YCC422_20bit; 864 remap_size = HDMI_VP_REMAP_YCC422_20bit;
765 else if (hdmi_data->enc_color_depth == 12) 865 break;
866 case 12:
766 remap_size = HDMI_VP_REMAP_YCC422_24bit; 867 remap_size = HDMI_VP_REMAP_YCC422_24bit;
767 else 868 break;
869
870 default:
768 return; 871 return;
872 }
769 output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422; 873 output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422;
770 } else { 874 } else {
771 return; 875 return;
@@ -1111,10 +1215,46 @@ static enum drm_connector_status dw_hdmi_phy_read_hpd(struct dw_hdmi *hdmi,
1111 connector_status_connected : connector_status_disconnected; 1215 connector_status_connected : connector_status_disconnected;
1112} 1216}
1113 1217
1218static void dw_hdmi_phy_update_hpd(struct dw_hdmi *hdmi, void *data,
1219 bool force, bool disabled, bool rxsense)
1220{
1221 u8 old_mask = hdmi->phy_mask;
1222
1223 if (force || disabled || !rxsense)
1224 hdmi->phy_mask |= HDMI_PHY_RX_SENSE;
1225 else
1226 hdmi->phy_mask &= ~HDMI_PHY_RX_SENSE;
1227
1228 if (old_mask != hdmi->phy_mask)
1229 hdmi_writeb(hdmi, hdmi->phy_mask, HDMI_PHY_MASK0);
1230}
1231
1232static void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data)
1233{
1234 /*
1235 * Configure the PHY RX SENSE and HPD interrupts polarities and clear
1236 * any pending interrupt.
1237 */
1238 hdmi_writeb(hdmi, HDMI_PHY_HPD | HDMI_PHY_RX_SENSE, HDMI_PHY_POL0);
1239 hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE,
1240 HDMI_IH_PHY_STAT0);
1241
1242 /* Enable cable hot plug irq. */
1243 hdmi_writeb(hdmi, hdmi->phy_mask, HDMI_PHY_MASK0);
1244
1245 /* Clear and unmute interrupts. */
1246 hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE,
1247 HDMI_IH_PHY_STAT0);
1248 hdmi_writeb(hdmi, ~(HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE),
1249 HDMI_IH_MUTE_PHY_STAT0);
1250}
1251
1114static const struct dw_hdmi_phy_ops dw_hdmi_synopsys_phy_ops = { 1252static const struct dw_hdmi_phy_ops dw_hdmi_synopsys_phy_ops = {
1115 .init = dw_hdmi_phy_init, 1253 .init = dw_hdmi_phy_init,
1116 .disable = dw_hdmi_phy_disable, 1254 .disable = dw_hdmi_phy_disable,
1117 .read_hpd = dw_hdmi_phy_read_hpd, 1255 .read_hpd = dw_hdmi_phy_read_hpd,
1256 .update_hpd = dw_hdmi_phy_update_hpd,
1257 .setup_hpd = dw_hdmi_phy_setup_hpd,
1118}; 1258};
1119 1259
1120/* ----------------------------------------------------------------------------- 1260/* -----------------------------------------------------------------------------
@@ -1148,28 +1288,36 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
1148 /* Initialise info frame from DRM mode */ 1288 /* Initialise info frame from DRM mode */
1149 drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); 1289 drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
1150 1290
1151 if (hdmi->hdmi_data.enc_out_format == YCBCR444) 1291 if (hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format))
1152 frame.colorspace = HDMI_COLORSPACE_YUV444; 1292 frame.colorspace = HDMI_COLORSPACE_YUV444;
1153 else if (hdmi->hdmi_data.enc_out_format == YCBCR422_8BITS) 1293 else if (hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format))
1154 frame.colorspace = HDMI_COLORSPACE_YUV422; 1294 frame.colorspace = HDMI_COLORSPACE_YUV422;
1155 else 1295 else
1156 frame.colorspace = HDMI_COLORSPACE_RGB; 1296 frame.colorspace = HDMI_COLORSPACE_RGB;
1157 1297
1158 /* Set up colorimetry */ 1298 /* Set up colorimetry */
1159 if (hdmi->hdmi_data.enc_out_format == XVYCC444) { 1299 switch (hdmi->hdmi_data.enc_out_encoding) {
1160 frame.colorimetry = HDMI_COLORIMETRY_EXTENDED; 1300 case V4L2_YCBCR_ENC_601:
1161 if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601) 1301 if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601)
1162 frame.extended_colorimetry = 1302 frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
1303 else
1304 frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
1305 frame.extended_colorimetry =
1163 HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; 1306 HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
1164 else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/ 1307 break;
1165 frame.extended_colorimetry = 1308 case V4L2_YCBCR_ENC_709:
1309 if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709)
1310 frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
1311 else
1312 frame.colorimetry = HDMI_COLORIMETRY_ITU_709;
1313 frame.extended_colorimetry =
1166 HDMI_EXTENDED_COLORIMETRY_XV_YCC_709; 1314 HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
1167 } else if (hdmi->hdmi_data.enc_out_format != RGB) { 1315 break;
1168 frame.colorimetry = hdmi->hdmi_data.colorimetry; 1316 default: /* Carries no data */
1169 frame.extended_colorimetry = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; 1317 frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
1170 } else { /* Carries no data */ 1318 frame.extended_colorimetry =
1171 frame.colorimetry = HDMI_COLORIMETRY_NONE; 1319 HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
1172 frame.extended_colorimetry = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; 1320 break;
1173 } 1321 }
1174 1322
1175 frame.scan_mode = HDMI_SCAN_MODE_NONE; 1323 frame.scan_mode = HDMI_SCAN_MODE_NONE;
@@ -1498,19 +1646,30 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
1498 (hdmi->vic == 21) || (hdmi->vic == 22) || 1646 (hdmi->vic == 21) || (hdmi->vic == 22) ||
1499 (hdmi->vic == 2) || (hdmi->vic == 3) || 1647 (hdmi->vic == 2) || (hdmi->vic == 3) ||
1500 (hdmi->vic == 17) || (hdmi->vic == 18)) 1648 (hdmi->vic == 17) || (hdmi->vic == 18))
1501 hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_601; 1649 hdmi->hdmi_data.enc_out_encoding = V4L2_YCBCR_ENC_601;
1502 else 1650 else
1503 hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709; 1651 hdmi->hdmi_data.enc_out_encoding = V4L2_YCBCR_ENC_709;
1504 1652
1505 hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0; 1653 hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0;
1506 hdmi->hdmi_data.video_mode.mpixelrepetitioninput = 0; 1654 hdmi->hdmi_data.video_mode.mpixelrepetitioninput = 0;
1507 1655
1508 /* TODO: Get input format from IPU (via FB driver interface) */ 1656 /* TOFIX: Get input format from plat data or fallback to RGB888 */
1509 hdmi->hdmi_data.enc_in_format = RGB; 1657 if (hdmi->plat_data->input_bus_format)
1658 hdmi->hdmi_data.enc_in_bus_format =
1659 hdmi->plat_data->input_bus_format;
1660 else
1661 hdmi->hdmi_data.enc_in_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
1662
1663 /* TOFIX: Get input encoding from plat data or fallback to none */
1664 if (hdmi->plat_data->input_bus_encoding)
1665 hdmi->hdmi_data.enc_in_encoding =
1666 hdmi->plat_data->input_bus_encoding;
1667 else
1668 hdmi->hdmi_data.enc_in_encoding = V4L2_YCBCR_ENC_DEFAULT;
1510 1669
1511 hdmi->hdmi_data.enc_out_format = RGB; 1670 /* TOFIX: Default to RGB888 output format */
1671 hdmi->hdmi_data.enc_out_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
1512 1672
1513 hdmi->hdmi_data.enc_color_depth = 8;
1514 hdmi->hdmi_data.pix_repet_factor = 0; 1673 hdmi->hdmi_data.pix_repet_factor = 0;
1515 hdmi->hdmi_data.hdcp_enable = 0; 1674 hdmi->hdmi_data.hdcp_enable = 0;
1516 hdmi->hdmi_data.video_mode.mdataenablepolarity = true; 1675 hdmi->hdmi_data.video_mode.mdataenablepolarity = true;
@@ -1558,8 +1717,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
1558 return 0; 1717 return 0;
1559} 1718}
1560 1719
1561/* Wait until we are registered to enable interrupts */ 1720static void dw_hdmi_setup_i2c(struct dw_hdmi *hdmi)
1562static int dw_hdmi_fb_registered(struct dw_hdmi *hdmi)
1563{ 1721{
1564 hdmi_writeb(hdmi, HDMI_PHY_I2CM_INT_ADDR_DONE_POL, 1722 hdmi_writeb(hdmi, HDMI_PHY_I2CM_INT_ADDR_DONE_POL,
1565 HDMI_PHY_I2CM_INT_ADDR); 1723 HDMI_PHY_I2CM_INT_ADDR);
@@ -1567,15 +1725,6 @@ static int dw_hdmi_fb_registered(struct dw_hdmi *hdmi)
1567 hdmi_writeb(hdmi, HDMI_PHY_I2CM_CTLINT_ADDR_NAC_POL | 1725 hdmi_writeb(hdmi, HDMI_PHY_I2CM_CTLINT_ADDR_NAC_POL |
1568 HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_POL, 1726 HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_POL,
1569 HDMI_PHY_I2CM_CTLINT_ADDR); 1727 HDMI_PHY_I2CM_CTLINT_ADDR);
1570
1571 /* enable cable hot plug irq */
1572 hdmi_writeb(hdmi, hdmi->phy_mask, HDMI_PHY_MASK0);
1573
1574 /* Clear Hotplug interrupts */
1575 hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE,
1576 HDMI_IH_PHY_STAT0);
1577
1578 return 0;
1579} 1728}
1580 1729
1581static void initialize_hdmi_ih_mutes(struct dw_hdmi *hdmi) 1730static void initialize_hdmi_ih_mutes(struct dw_hdmi *hdmi)
@@ -1682,15 +1831,10 @@ static void dw_hdmi_update_power(struct dw_hdmi *hdmi)
1682 */ 1831 */
1683static void dw_hdmi_update_phy_mask(struct dw_hdmi *hdmi) 1832static void dw_hdmi_update_phy_mask(struct dw_hdmi *hdmi)
1684{ 1833{
1685 u8 old_mask = hdmi->phy_mask; 1834 if (hdmi->phy.ops->update_hpd)
1686 1835 hdmi->phy.ops->update_hpd(hdmi, hdmi->phy.data,
1687 if (hdmi->force || hdmi->disabled || !hdmi->rxsense) 1836 hdmi->force, hdmi->disabled,
1688 hdmi->phy_mask |= HDMI_PHY_RX_SENSE; 1837 hdmi->rxsense);
1689 else
1690 hdmi->phy_mask &= ~HDMI_PHY_RX_SENSE;
1691
1692 if (old_mask != hdmi->phy_mask)
1693 hdmi_writeb(hdmi, hdmi->phy_mask, HDMI_PHY_MASK0);
1694} 1838}
1695 1839
1696static enum drm_connector_status 1840static enum drm_connector_status
@@ -1803,6 +1947,20 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
1803 return 0; 1947 return 0;
1804} 1948}
1805 1949
1950static bool dw_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
1951 const struct drm_display_mode *orig_mode,
1952 struct drm_display_mode *mode)
1953{
1954 struct dw_hdmi *hdmi = bridge->driver_private;
1955 struct drm_connector *connector = &hdmi->connector;
1956 enum drm_mode_status status;
1957
1958 status = dw_hdmi_connector_mode_valid(connector, mode);
1959 if (status != MODE_OK)
1960 return false;
1961 return true;
1962}
1963
1806static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge, 1964static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge,
1807 struct drm_display_mode *orig_mode, 1965 struct drm_display_mode *orig_mode,
1808 struct drm_display_mode *mode) 1966 struct drm_display_mode *mode)
@@ -1844,6 +2002,7 @@ static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
1844 .enable = dw_hdmi_bridge_enable, 2002 .enable = dw_hdmi_bridge_enable,
1845 .disable = dw_hdmi_bridge_disable, 2003 .disable = dw_hdmi_bridge_disable,
1846 .mode_set = dw_hdmi_bridge_mode_set, 2004 .mode_set = dw_hdmi_bridge_mode_set,
2005 .mode_fixup = dw_hdmi_bridge_mode_fixup,
1847}; 2006};
1848 2007
1849static irqreturn_t dw_hdmi_i2c_irq(struct dw_hdmi *hdmi) 2008static irqreturn_t dw_hdmi_i2c_irq(struct dw_hdmi *hdmi)
@@ -1882,6 +2041,41 @@ static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id)
1882 return ret; 2041 return ret;
1883} 2042}
1884 2043
2044void __dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense)
2045{
2046 mutex_lock(&hdmi->mutex);
2047
2048 if (!hdmi->force) {
2049 /*
2050 * If the RX sense status indicates we're disconnected,
2051 * clear the software rxsense status.
2052 */
2053 if (!rx_sense)
2054 hdmi->rxsense = false;
2055
2056 /*
2057 * Only set the software rxsense status when both
2058 * rxsense and hpd indicates we're connected.
2059 * This avoids what seems to be bad behaviour in
2060 * at least iMX6S versions of the phy.
2061 */
2062 if (hpd)
2063 hdmi->rxsense = true;
2064
2065 dw_hdmi_update_power(hdmi);
2066 dw_hdmi_update_phy_mask(hdmi);
2067 }
2068 mutex_unlock(&hdmi->mutex);
2069}
2070
2071void dw_hdmi_setup_rx_sense(struct device *dev, bool hpd, bool rx_sense)
2072{
2073 struct dw_hdmi *hdmi = dev_get_drvdata(dev);
2074
2075 __dw_hdmi_setup_rx_sense(hdmi, hpd, rx_sense);
2076}
2077EXPORT_SYMBOL_GPL(dw_hdmi_setup_rx_sense);
2078
1885static irqreturn_t dw_hdmi_irq(int irq, void *dev_id) 2079static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
1886{ 2080{
1887 struct dw_hdmi *hdmi = dev_id; 2081 struct dw_hdmi *hdmi = dev_id;
@@ -1914,30 +2108,10 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
1914 * ask the source to re-read the EDID. 2108 * ask the source to re-read the EDID.
1915 */ 2109 */
1916 if (intr_stat & 2110 if (intr_stat &
1917 (HDMI_IH_PHY_STAT0_RX_SENSE | HDMI_IH_PHY_STAT0_HPD)) { 2111 (HDMI_IH_PHY_STAT0_RX_SENSE | HDMI_IH_PHY_STAT0_HPD))
1918 mutex_lock(&hdmi->mutex); 2112 __dw_hdmi_setup_rx_sense(hdmi,
1919 if (!hdmi->force) { 2113 phy_stat & HDMI_PHY_HPD,
1920 /* 2114 phy_stat & HDMI_PHY_RX_SENSE);
1921 * If the RX sense status indicates we're disconnected,
1922 * clear the software rxsense status.
1923 */
1924 if (!(phy_stat & HDMI_PHY_RX_SENSE))
1925 hdmi->rxsense = false;
1926
1927 /*
1928 * Only set the software rxsense status when both
1929 * rxsense and hpd indicates we're connected.
1930 * This avoids what seems to be bad behaviour in
1931 * at least iMX6S versions of the phy.
1932 */
1933 if (phy_stat & HDMI_PHY_HPD)
1934 hdmi->rxsense = true;
1935
1936 dw_hdmi_update_power(hdmi);
1937 dw_hdmi_update_phy_mask(hdmi);
1938 }
1939 mutex_unlock(&hdmi->mutex);
1940 }
1941 2115
1942 if (intr_stat & HDMI_IH_PHY_STAT0_HPD) { 2116 if (intr_stat & HDMI_IH_PHY_STAT0_HPD) {
1943 dev_dbg(hdmi->dev, "EVENT=%s\n", 2117 dev_dbg(hdmi->dev, "EVENT=%s\n",
@@ -2204,29 +2378,15 @@ __dw_hdmi_probe(struct platform_device *pdev,
2204 hdmi->ddc = NULL; 2378 hdmi->ddc = NULL;
2205 } 2379 }
2206 2380
2207 /*
2208 * Configure registers related to HDMI interrupt
2209 * generation before registering IRQ.
2210 */
2211 hdmi_writeb(hdmi, HDMI_PHY_HPD | HDMI_PHY_RX_SENSE, HDMI_PHY_POL0);
2212
2213 /* Clear Hotplug interrupts */
2214 hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE,
2215 HDMI_IH_PHY_STAT0);
2216
2217 hdmi->bridge.driver_private = hdmi; 2381 hdmi->bridge.driver_private = hdmi;
2218 hdmi->bridge.funcs = &dw_hdmi_bridge_funcs; 2382 hdmi->bridge.funcs = &dw_hdmi_bridge_funcs;
2219#ifdef CONFIG_OF 2383#ifdef CONFIG_OF
2220 hdmi->bridge.of_node = pdev->dev.of_node; 2384 hdmi->bridge.of_node = pdev->dev.of_node;
2221#endif 2385#endif
2222 2386
2223 ret = dw_hdmi_fb_registered(hdmi); 2387 dw_hdmi_setup_i2c(hdmi);
2224 if (ret) 2388 if (hdmi->phy.ops->setup_hpd)
2225 goto err_iahb; 2389 hdmi->phy.ops->setup_hpd(hdmi, hdmi->phy.data);
2226
2227 /* Unmute interrupts */
2228 hdmi_writeb(hdmi, ~(HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE),
2229 HDMI_IH_MUTE_PHY_STAT0);
2230 2390
2231 memset(&pdevinfo, 0, sizeof(pdevinfo)); 2391 memset(&pdevinfo, 0, sizeof(pdevinfo));
2232 pdevinfo.parent = dev; 2392 pdevinfo.parent = dev;
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index de9ffb49e9f6..5c26488e7a2d 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1244,7 +1244,6 @@ static const struct regmap_config tc_regmap_config = {
1244static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id) 1244static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
1245{ 1245{
1246 struct device *dev = &client->dev; 1246 struct device *dev = &client->dev;
1247 struct device_node *ep;
1248 struct tc_data *tc; 1247 struct tc_data *tc;
1249 int ret; 1248 int ret;
1250 1249
@@ -1255,29 +1254,9 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
1255 tc->dev = dev; 1254 tc->dev = dev;
1256 1255
1257 /* port@2 is the output port */ 1256 /* port@2 is the output port */
1258 ep = of_graph_get_endpoint_by_regs(dev->of_node, 2, -1); 1257 ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL);
1259 if (ep) { 1258 if (ret)
1260 struct device_node *remote; 1259 return ret;
1261
1262 remote = of_graph_get_remote_port_parent(ep);
1263 if (!remote) {
1264 dev_warn(dev, "endpoint %s not connected\n",
1265 ep->full_name);
1266 of_node_put(ep);
1267 return -ENODEV;
1268 }
1269 of_node_put(ep);
1270 tc->panel = of_drm_find_panel(remote);
1271 if (tc->panel) {
1272 dev_dbg(dev, "found panel %s\n", remote->full_name);
1273 } else {
1274 dev_dbg(dev, "waiting for panel %s\n",
1275 remote->full_name);
1276 of_node_put(remote);
1277 return -EPROBE_DEFER;
1278 }
1279 of_node_put(remote);
1280 }
1281 1260
1282 /* Shut down GPIO is optional */ 1261 /* Shut down GPIO is optional */
1283 tc->sd_gpio = devm_gpiod_get_optional(dev, "shutdown", GPIOD_OUT_HIGH); 1262 tc->sd_gpio = devm_gpiod_get_optional(dev, "shutdown", GPIOD_OUT_HIGH);
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 7d519b46aee4..eee4efda829e 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -165,18 +165,13 @@ static irqreturn_t tfp410_hpd_irq_thread(int irq, void *arg)
165 165
166static int tfp410_get_connector_properties(struct tfp410 *dvi) 166static int tfp410_get_connector_properties(struct tfp410 *dvi)
167{ 167{
168 struct device_node *ep = NULL, *connector_node = NULL; 168 struct device_node *connector_node, *ddc_phandle;
169 struct device_node *ddc_phandle = NULL;
170 int ret = 0; 169 int ret = 0;
171 170
172 /* port@1 is the connector node */ 171 /* port@1 is the connector node */
173 ep = of_graph_get_endpoint_by_regs(dvi->dev->of_node, 1, -1); 172 connector_node = of_graph_get_remote_node(dvi->dev->of_node, 1, -1);
174 if (!ep)
175 goto fail;
176
177 connector_node = of_graph_get_remote_port_parent(ep);
178 if (!connector_node) 173 if (!connector_node)
179 goto fail; 174 return -ENODEV;
180 175
181 dvi->hpd = fwnode_get_named_gpiod(&connector_node->fwnode, 176 dvi->hpd = fwnode_get_named_gpiod(&connector_node->fwnode,
182 "hpd-gpios", 0, GPIOD_IN, "hpd"); 177 "hpd-gpios", 0, GPIOD_IN, "hpd");
@@ -199,10 +194,10 @@ static int tfp410_get_connector_properties(struct tfp410 *dvi)
199 else 194 else
200 ret = -EPROBE_DEFER; 195 ret = -EPROBE_DEFER;
201 196
197 of_node_put(ddc_phandle);
198
202fail: 199fail:
203 of_node_put(ep);
204 of_node_put(connector_node); 200 of_node_put(connector_node);
205 of_node_put(ddc_phandle);
206 return ret; 201 return ret;
207} 202}
208 203
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index ed43ab10ac99..53f6f0f84206 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -327,7 +327,8 @@ static void cirrus_crtc_commit(struct drm_crtc *crtc)
327 * but it's a requirement that we provide the function 327 * but it's a requirement that we provide the function
328 */ 328 */
329static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 329static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
330 u16 *blue, uint32_t size) 330 u16 *blue, uint32_t size,
331 struct drm_modeset_acquire_ctx *ctx)
331{ 332{
332 struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc); 333 struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
333 int i; 334 int i;
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index f53aa8f4a143..93dbcd38355d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -236,6 +236,7 @@ struct ttm_bo_driver cirrus_bo_driver = {
236 .verify_access = cirrus_bo_verify_access, 236 .verify_access = cirrus_bo_verify_access,
237 .io_mem_reserve = &cirrus_ttm_io_mem_reserve, 237 .io_mem_reserve = &cirrus_ttm_io_mem_reserve,
238 .io_mem_free = &cirrus_ttm_io_mem_free, 238 .io_mem_free = &cirrus_ttm_io_mem_free,
239 .io_mem_pfn = ttm_bo_default_io_mem_pfn,
239}; 240};
240 241
241int cirrus_mm_init(struct cirrus_device *cirrus) 242int cirrus_mm_init(struct cirrus_device *cirrus)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 9b892af7811a..f32506a7c1d6 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1516,19 +1516,9 @@ EXPORT_SYMBOL(drm_atomic_add_affected_planes);
1516void drm_atomic_legacy_backoff(struct drm_atomic_state *state) 1516void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
1517{ 1517{
1518 struct drm_device *dev = state->dev; 1518 struct drm_device *dev = state->dev;
1519 unsigned crtc_mask = 0;
1520 struct drm_crtc *crtc;
1521 int ret; 1519 int ret;
1522 bool global = false; 1520 bool global = false;
1523 1521
1524 drm_for_each_crtc(crtc, dev) {
1525 if (crtc->acquire_ctx != state->acquire_ctx)
1526 continue;
1527
1528 crtc_mask |= drm_crtc_mask(crtc);
1529 crtc->acquire_ctx = NULL;
1530 }
1531
1532 if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) { 1522 if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) {
1533 global = true; 1523 global = true;
1534 1524
@@ -1542,10 +1532,6 @@ retry:
1542 if (ret) 1532 if (ret)
1543 goto retry; 1533 goto retry;
1544 1534
1545 drm_for_each_crtc(crtc, dev)
1546 if (drm_crtc_mask(crtc) & crtc_mask)
1547 crtc->acquire_ctx = state->acquire_ctx;
1548
1549 if (global) 1535 if (global)
1550 dev->mode_config.acquire_ctx = state->acquire_ctx; 1536 dev->mode_config.acquire_ctx = state->acquire_ctx;
1551} 1537}
@@ -1690,22 +1676,8 @@ static void drm_atomic_print_state(const struct drm_atomic_state *state)
1690 drm_atomic_connector_print_state(&p, connector_state); 1676 drm_atomic_connector_print_state(&p, connector_state);
1691} 1677}
1692 1678
1693/** 1679static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
1694 * drm_state_dump - dump entire device atomic state 1680 bool take_locks)
1695 * @dev: the drm device
1696 * @p: where to print the state to
1697 *
1698 * Just for debugging. Drivers might want an option to dump state
1699 * to dmesg in case of error irq's. (Hint, you probably want to
1700 * ratelimit this!)
1701 *
1702 * The caller must drm_modeset_lock_all(), or if this is called
1703 * from error irq handler, it should not be enabled by default.
1704 * (Ie. if you are debugging errors you might not care that this
1705 * is racey. But calling this without all modeset locks held is
1706 * not inherently safe.)
1707 */
1708void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
1709{ 1681{
1710 struct drm_mode_config *config = &dev->mode_config; 1682 struct drm_mode_config *config = &dev->mode_config;
1711 struct drm_plane *plane; 1683 struct drm_plane *plane;
@@ -1716,17 +1688,51 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
1716 if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) 1688 if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
1717 return; 1689 return;
1718 1690
1719 list_for_each_entry(plane, &config->plane_list, head) 1691 list_for_each_entry(plane, &config->plane_list, head) {
1692 if (take_locks)
1693 drm_modeset_lock(&plane->mutex, NULL);
1720 drm_atomic_plane_print_state(p, plane->state); 1694 drm_atomic_plane_print_state(p, plane->state);
1695 if (take_locks)
1696 drm_modeset_unlock(&plane->mutex);
1697 }
1721 1698
1722 list_for_each_entry(crtc, &config->crtc_list, head) 1699 list_for_each_entry(crtc, &config->crtc_list, head) {
1700 if (take_locks)
1701 drm_modeset_lock(&crtc->mutex, NULL);
1723 drm_atomic_crtc_print_state(p, crtc->state); 1702 drm_atomic_crtc_print_state(p, crtc->state);
1703 if (take_locks)
1704 drm_modeset_unlock(&crtc->mutex);
1705 }
1724 1706
1725 drm_connector_list_iter_begin(dev, &conn_iter); 1707 drm_connector_list_iter_begin(dev, &conn_iter);
1708 if (take_locks)
1709 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1726 drm_for_each_connector_iter(connector, &conn_iter) 1710 drm_for_each_connector_iter(connector, &conn_iter)
1727 drm_atomic_connector_print_state(p, connector->state); 1711 drm_atomic_connector_print_state(p, connector->state);
1712 if (take_locks)
1713 drm_modeset_unlock(&dev->mode_config.connection_mutex);
1728 drm_connector_list_iter_end(&conn_iter); 1714 drm_connector_list_iter_end(&conn_iter);
1729} 1715}
1716
1717/**
1718 * drm_state_dump - dump entire device atomic state
1719 * @dev: the drm device
1720 * @p: where to print the state to
1721 *
1722 * Just for debugging. Drivers might want an option to dump state
1723 * to dmesg in case of error irq's. (Hint, you probably want to
1724 * ratelimit this!)
1725 *
1726 * The caller must drm_modeset_lock_all(), or if this is called
1727 * from error irq handler, it should not be enabled by default.
1728 * (Ie. if you are debugging errors you might not care that this
1729 * is racey. But calling this without all modeset locks held is
1730 * not inherently safe.)
1731 */
1732void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
1733{
1734 __drm_state_dump(dev, p, false);
1735}
1730EXPORT_SYMBOL(drm_state_dump); 1736EXPORT_SYMBOL(drm_state_dump);
1731 1737
1732#ifdef CONFIG_DEBUG_FS 1738#ifdef CONFIG_DEBUG_FS
@@ -1736,9 +1742,7 @@ static int drm_state_info(struct seq_file *m, void *data)
1736 struct drm_device *dev = node->minor->dev; 1742 struct drm_device *dev = node->minor->dev;
1737 struct drm_printer p = drm_seq_file_printer(m); 1743 struct drm_printer p = drm_seq_file_printer(m);
1738 1744
1739 drm_modeset_lock_all(dev); 1745 __drm_state_dump(dev, &p, true);
1740 drm_state_dump(dev, &p);
1741 drm_modeset_unlock_all(dev);
1742 1746
1743 return 0; 1747 return 0;
1744} 1748}
@@ -2077,94 +2081,6 @@ static void complete_crtc_signaling(struct drm_device *dev,
2077 kfree(fence_state); 2081 kfree(fence_state);
2078} 2082}
2079 2083
2080int drm_atomic_remove_fb(struct drm_framebuffer *fb)
2081{
2082 struct drm_modeset_acquire_ctx ctx;
2083 struct drm_device *dev = fb->dev;
2084 struct drm_atomic_state *state;
2085 struct drm_plane *plane;
2086 struct drm_connector *conn;
2087 struct drm_connector_state *conn_state;
2088 int i, ret = 0;
2089 unsigned plane_mask;
2090
2091 state = drm_atomic_state_alloc(dev);
2092 if (!state)
2093 return -ENOMEM;
2094
2095 drm_modeset_acquire_init(&ctx, 0);
2096 state->acquire_ctx = &ctx;
2097
2098retry:
2099 plane_mask = 0;
2100 ret = drm_modeset_lock_all_ctx(dev, &ctx);
2101 if (ret)
2102 goto unlock;
2103
2104 drm_for_each_plane(plane, dev) {
2105 struct drm_plane_state *plane_state;
2106
2107 if (plane->state->fb != fb)
2108 continue;
2109
2110 plane_state = drm_atomic_get_plane_state(state, plane);
2111 if (IS_ERR(plane_state)) {
2112 ret = PTR_ERR(plane_state);
2113 goto unlock;
2114 }
2115
2116 if (plane_state->crtc->primary == plane) {
2117 struct drm_crtc_state *crtc_state;
2118
2119 crtc_state = drm_atomic_get_existing_crtc_state(state, plane_state->crtc);
2120
2121 ret = drm_atomic_add_affected_connectors(state, plane_state->crtc);
2122 if (ret)
2123 goto unlock;
2124
2125 crtc_state->active = false;
2126 ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
2127 if (ret)
2128 goto unlock;
2129 }
2130
2131 drm_atomic_set_fb_for_plane(plane_state, NULL);
2132 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
2133 if (ret)
2134 goto unlock;
2135
2136 plane_mask |= BIT(drm_plane_index(plane));
2137
2138 plane->old_fb = plane->fb;
2139 }
2140
2141 for_each_connector_in_state(state, conn, conn_state, i) {
2142 ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
2143
2144 if (ret)
2145 goto unlock;
2146 }
2147
2148 if (plane_mask)
2149 ret = drm_atomic_commit(state);
2150
2151unlock:
2152 if (plane_mask)
2153 drm_atomic_clean_old_fb(dev, plane_mask, ret);
2154
2155 if (ret == -EDEADLK) {
2156 drm_modeset_backoff(&ctx);
2157 goto retry;
2158 }
2159
2160 drm_atomic_state_put(state);
2161
2162 drm_modeset_drop_locks(&ctx);
2163 drm_modeset_acquire_fini(&ctx);
2164
2165 return ret;
2166}
2167
2168int drm_mode_atomic_ioctl(struct drm_device *dev, 2084int drm_mode_atomic_ioctl(struct drm_device *dev,
2169 void *data, struct drm_file *file_priv) 2085 void *data, struct drm_file *file_priv)
2170{ 2086{
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index c3994b4d5f32..8be9719284b0 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -459,10 +459,20 @@ mode_fixup(struct drm_atomic_state *state)
459 * 459 *
460 * Check the state object to see if the requested state is physically possible. 460 * Check the state object to see if the requested state is physically possible.
461 * This does all the crtc and connector related computations for an atomic 461 * This does all the crtc and connector related computations for an atomic
462 * update and adds any additional connectors needed for full modesets and calls 462 * update and adds any additional connectors needed for full modesets. It calls
463 * down into &drm_crtc_helper_funcs.mode_fixup and 463 * the various per-object callbacks in the follow order:
464 * &drm_encoder_helper_funcs.mode_fixup or 464 *
465 * &drm_encoder_helper_funcs.atomic_check functions of the driver backend. 465 * 1. &drm_connector_helper_funcs.atomic_best_encoder for determining the new encoder.
466 * 2. &drm_connector_helper_funcs.atomic_check to validate the connector state.
467 * 3. If it's determined a modeset is needed then all connectors on the affected crtc
468 * crtc are added and &drm_connector_helper_funcs.atomic_check is run on them.
469 * 4. &drm_bridge_funcs.mode_fixup is called on all encoder bridges.
470 * 5. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state.
471 * This function is only called when the encoder will be part of a configured crtc,
472 * it must not be used for implementing connector property validation.
473 * If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called
474 * instead.
475 * 6. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with crtc constraints.
466 * 476 *
467 * &drm_crtc_state.mode_changed is set when the input mode is changed. 477 * &drm_crtc_state.mode_changed is set when the input mode is changed.
468 * &drm_crtc_state.connectors_changed is set when a connector is added or 478 * &drm_crtc_state.connectors_changed is set when a connector is added or
@@ -492,8 +502,12 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
492 struct drm_connector *connector; 502 struct drm_connector *connector;
493 struct drm_connector_state *old_connector_state, *new_connector_state; 503 struct drm_connector_state *old_connector_state, *new_connector_state;
494 int i, ret; 504 int i, ret;
505 unsigned connectors_mask = 0;
495 506
496 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 507 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
508 bool has_connectors =
509 !!new_crtc_state->connector_mask;
510
497 if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) { 511 if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
498 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n", 512 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n",
499 crtc->base.id, crtc->name); 513 crtc->base.id, crtc->name);
@@ -515,13 +529,28 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
515 new_crtc_state->mode_changed = true; 529 new_crtc_state->mode_changed = true;
516 new_crtc_state->connectors_changed = true; 530 new_crtc_state->connectors_changed = true;
517 } 531 }
532
533 if (old_crtc_state->active != new_crtc_state->active) {
534 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active changed\n",
535 crtc->base.id, crtc->name);
536 new_crtc_state->active_changed = true;
537 }
538
539 if (new_crtc_state->enable != has_connectors) {
540 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n",
541 crtc->base.id, crtc->name);
542
543 return -EINVAL;
544 }
518 } 545 }
519 546
520 ret = handle_conflicting_encoders(state, state->legacy_set_config); 547 ret = handle_conflicting_encoders(state, false);
521 if (ret) 548 if (ret)
522 return ret; 549 return ret;
523 550
524 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) { 551 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
552 const struct drm_connector_helper_funcs *funcs = connector->helper_private;
553
525 /* 554 /*
526 * This only sets crtc->connectors_changed for routing changes, 555 * This only sets crtc->connectors_changed for routing changes,
527 * drivers must set crtc->connectors_changed themselves when 556 * drivers must set crtc->connectors_changed themselves when
@@ -539,6 +568,13 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
539 new_connector_state->link_status) 568 new_connector_state->link_status)
540 new_crtc_state->connectors_changed = true; 569 new_crtc_state->connectors_changed = true;
541 } 570 }
571
572 if (funcs->atomic_check)
573 ret = funcs->atomic_check(connector, new_connector_state);
574 if (ret)
575 return ret;
576
577 connectors_mask += BIT(i);
542 } 578 }
543 579
544 /* 580 /*
@@ -548,20 +584,6 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
548 * crtc only changed its mode but has the same set of connectors. 584 * crtc only changed its mode but has the same set of connectors.
549 */ 585 */
550 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 586 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
551 bool has_connectors =
552 !!new_crtc_state->connector_mask;
553
554 /*
555 * We must set ->active_changed after walking connectors for
556 * otherwise an update that only changes active would result in
557 * a full modeset because update_connector_routing force that.
558 */
559 if (old_crtc_state->active != new_crtc_state->active) {
560 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active changed\n",
561 crtc->base.id, crtc->name);
562 new_crtc_state->active_changed = true;
563 }
564
565 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 587 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
566 continue; 588 continue;
567 589
@@ -577,13 +599,22 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
577 ret = drm_atomic_add_affected_planes(state, crtc); 599 ret = drm_atomic_add_affected_planes(state, crtc);
578 if (ret != 0) 600 if (ret != 0)
579 return ret; 601 return ret;
602 }
580 603
581 if (new_crtc_state->enable != has_connectors) { 604 /*
582 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n", 605 * Iterate over all connectors again, to make sure atomic_check()
583 crtc->base.id, crtc->name); 606 * has been called on them when a modeset is forced.
607 */
608 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
609 const struct drm_connector_helper_funcs *funcs = connector->helper_private;
584 610
585 return -EINVAL; 611 if (connectors_mask & BIT(i))
586 } 612 continue;
613
614 if (funcs->atomic_check)
615 ret = funcs->atomic_check(connector, new_connector_state);
616 if (ret)
617 return ret;
587 } 618 }
588 619
589 return mode_fixup(state); 620 return mode_fixup(state);
@@ -2289,12 +2320,15 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set,
2289 if (!state) 2320 if (!state)
2290 return -ENOMEM; 2321 return -ENOMEM;
2291 2322
2292 state->legacy_set_config = true;
2293 state->acquire_ctx = ctx; 2323 state->acquire_ctx = ctx;
2294 ret = __drm_atomic_helper_set_config(set, state); 2324 ret = __drm_atomic_helper_set_config(set, state);
2295 if (ret != 0) 2325 if (ret != 0)
2296 goto fail; 2326 goto fail;
2297 2327
2328 ret = handle_conflicting_encoders(state, true);
2329 if (ret)
2330 return ret;
2331
2298 ret = drm_atomic_commit(state); 2332 ret = drm_atomic_commit(state);
2299 2333
2300fail: 2334fail:
@@ -2622,14 +2656,22 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
2622int drm_atomic_helper_resume(struct drm_device *dev, 2656int drm_atomic_helper_resume(struct drm_device *dev,
2623 struct drm_atomic_state *state) 2657 struct drm_atomic_state *state)
2624{ 2658{
2625 struct drm_mode_config *config = &dev->mode_config; 2659 struct drm_modeset_acquire_ctx ctx;
2626 int err; 2660 int err;
2627 2661
2628 drm_mode_config_reset(dev); 2662 drm_mode_config_reset(dev);
2629 2663
2630 drm_modeset_lock_all(dev); 2664 drm_modeset_acquire_init(&ctx, 0);
2631 err = drm_atomic_helper_commit_duplicated_state(state, config->acquire_ctx); 2665 while (1) {
2632 drm_modeset_unlock_all(dev); 2666 err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
2667 if (err != -EDEADLK)
2668 break;
2669
2670 drm_modeset_backoff(&ctx);
2671 }
2672
2673 drm_modeset_drop_locks(&ctx);
2674 drm_modeset_acquire_fini(&ctx);
2633 2675
2634 return err; 2676 return err;
2635} 2677}
@@ -2975,7 +3017,7 @@ int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
2975 if (!state) 3017 if (!state)
2976 return -ENOMEM; 3018 return -ENOMEM;
2977 3019
2978 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc); 3020 state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
2979retry: 3021retry:
2980 crtc_state = drm_atomic_get_crtc_state(state, crtc); 3022 crtc_state = drm_atomic_get_crtc_state(state, crtc);
2981 if (IS_ERR(crtc_state)) { 3023 if (IS_ERR(crtc_state)) {
@@ -3471,6 +3513,7 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
3471 * @green: green correction table 3513 * @green: green correction table
3472 * @blue: green correction table 3514 * @blue: green correction table
3473 * @size: size of the tables 3515 * @size: size of the tables
3516 * @ctx: lock acquire context
3474 * 3517 *
3475 * Implements support for legacy gamma correction table for drivers 3518 * Implements support for legacy gamma correction table for drivers
3476 * that support color management through the DEGAMMA_LUT/GAMMA_LUT 3519 * that support color management through the DEGAMMA_LUT/GAMMA_LUT
@@ -3478,7 +3521,8 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
3478 */ 3521 */
3479int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, 3522int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
3480 u16 *red, u16 *green, u16 *blue, 3523 u16 *red, u16 *green, u16 *blue,
3481 uint32_t size) 3524 uint32_t size,
3525 struct drm_modeset_acquire_ctx *ctx)
3482{ 3526{
3483 struct drm_device *dev = crtc->dev; 3527 struct drm_device *dev = crtc->dev;
3484 struct drm_mode_config *config = &dev->mode_config; 3528 struct drm_mode_config *config = &dev->mode_config;
@@ -3509,8 +3553,7 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
3509 blob_data[i].blue = blue[i]; 3553 blob_data[i].blue = blue[i];
3510 } 3554 }
3511 3555
3512 state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; 3556 state->acquire_ctx = ctx;
3513retry:
3514 crtc_state = drm_atomic_get_crtc_state(state, crtc); 3557 crtc_state = drm_atomic_get_crtc_state(state, crtc);
3515 if (IS_ERR(crtc_state)) { 3558 if (IS_ERR(crtc_state)) {
3516 ret = PTR_ERR(crtc_state); 3559 ret = PTR_ERR(crtc_state);
@@ -3534,18 +3577,10 @@ retry:
3534 goto fail; 3577 goto fail;
3535 3578
3536 ret = drm_atomic_commit(state); 3579 ret = drm_atomic_commit(state);
3537fail:
3538 if (ret == -EDEADLK)
3539 goto backoff;
3540 3580
3581fail:
3541 drm_atomic_state_put(state); 3582 drm_atomic_state_put(state);
3542 drm_property_blob_put(blob); 3583 drm_property_blob_put(blob);
3543 return ret; 3584 return ret;
3544
3545backoff:
3546 drm_atomic_state_clear(state);
3547 drm_atomic_legacy_backoff(state);
3548
3549 goto retry;
3550} 3585}
3551EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set); 3586EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index cc23b9a505c0..533f3a3e6877 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -218,28 +218,28 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
218 struct drm_crtc *crtc; 218 struct drm_crtc *crtc;
219 void *r_base, *g_base, *b_base; 219 void *r_base, *g_base, *b_base;
220 int size; 220 int size;
221 struct drm_modeset_acquire_ctx ctx;
221 int ret = 0; 222 int ret = 0;
222 223
223 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 224 if (!drm_core_check_feature(dev, DRIVER_MODESET))
224 return -EINVAL; 225 return -EINVAL;
225 226
226 drm_modeset_lock_all(dev);
227 crtc = drm_crtc_find(dev, crtc_lut->crtc_id); 227 crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
228 if (!crtc) { 228 if (!crtc)
229 ret = -ENOENT; 229 return -ENOENT;
230 goto out;
231 }
232 230
233 if (crtc->funcs->gamma_set == NULL) { 231 if (crtc->funcs->gamma_set == NULL)
234 ret = -ENOSYS; 232 return -ENOSYS;
235 goto out;
236 }
237 233
238 /* memcpy into gamma store */ 234 /* memcpy into gamma store */
239 if (crtc_lut->gamma_size != crtc->gamma_size) { 235 if (crtc_lut->gamma_size != crtc->gamma_size)
240 ret = -EINVAL; 236 return -EINVAL;
237
238 drm_modeset_acquire_init(&ctx, 0);
239retry:
240 ret = drm_modeset_lock_all_ctx(dev, &ctx);
241 if (ret)
241 goto out; 242 goto out;
242 }
243 243
244 size = crtc_lut->gamma_size * (sizeof(uint16_t)); 244 size = crtc_lut->gamma_size * (sizeof(uint16_t));
245 r_base = crtc->gamma_store; 245 r_base = crtc->gamma_store;
@@ -260,10 +260,17 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
260 goto out; 260 goto out;
261 } 261 }
262 262
263 ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size); 263 ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base,
264 crtc->gamma_size, &ctx);
264 265
265out: 266out:
266 drm_modeset_unlock_all(dev); 267 if (ret == -EDEADLK) {
268 drm_modeset_backoff(&ctx);
269 goto retry;
270 }
271 drm_modeset_drop_locks(&ctx);
272 drm_modeset_acquire_fini(&ctx);
273
267 return ret; 274 return ret;
268 275
269} 276}
@@ -295,19 +302,15 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
295 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 302 if (!drm_core_check_feature(dev, DRIVER_MODESET))
296 return -EINVAL; 303 return -EINVAL;
297 304
298 drm_modeset_lock_all(dev);
299 crtc = drm_crtc_find(dev, crtc_lut->crtc_id); 305 crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
300 if (!crtc) { 306 if (!crtc)
301 ret = -ENOENT; 307 return -ENOENT;
302 goto out;
303 }
304 308
305 /* memcpy into gamma store */ 309 /* memcpy into gamma store */
306 if (crtc_lut->gamma_size != crtc->gamma_size) { 310 if (crtc_lut->gamma_size != crtc->gamma_size)
307 ret = -EINVAL; 311 return -EINVAL;
308 goto out;
309 }
310 312
313 drm_modeset_lock(&crtc->mutex, NULL);
311 size = crtc_lut->gamma_size * (sizeof(uint16_t)); 314 size = crtc_lut->gamma_size * (sizeof(uint16_t));
312 r_base = crtc->gamma_store; 315 r_base = crtc->gamma_store;
313 if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) { 316 if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) {
@@ -327,6 +330,6 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
327 goto out; 330 goto out;
328 } 331 }
329out: 332out:
330 drm_modeset_unlock_all(dev); 333 drm_modeset_unlock(&crtc->mutex);
331 return ret; 334 return ret;
332} 335}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index d69e180fc563..5af25ce5bf7c 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -576,6 +576,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
576 } 576 }
577 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); 577 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
578 578
579 mutex_lock(&crtc->dev->mode_config.mutex);
579 drm_modeset_acquire_init(&ctx, 0); 580 drm_modeset_acquire_init(&ctx, 0);
580retry: 581retry:
581 ret = drm_modeset_lock_all_ctx(crtc->dev, &ctx); 582 ret = drm_modeset_lock_all_ctx(crtc->dev, &ctx);
@@ -721,6 +722,7 @@ out:
721 } 722 }
722 drm_modeset_drop_locks(&ctx); 723 drm_modeset_drop_locks(&ctx);
723 drm_modeset_acquire_fini(&ctx); 724 drm_modeset_acquire_fini(&ctx);
725 mutex_unlock(&crtc->dev->mode_config.mutex);
724 726
725 return ret; 727 return ret;
726} 728}
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 8c04275cf226..d077c5490041 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -182,7 +182,6 @@ int drm_atomic_get_property(struct drm_mode_object *obj,
182 struct drm_property *property, uint64_t *val); 182 struct drm_property *property, uint64_t *val);
183int drm_mode_atomic_ioctl(struct drm_device *dev, 183int drm_mode_atomic_ioctl(struct drm_device *dev,
184 void *data, struct drm_file *file_priv); 184 void *data, struct drm_file *file_priv);
185int drm_atomic_remove_fb(struct drm_framebuffer *fb);
186 185
187 186
188/* drm_plane.c */ 187/* drm_plane.c */
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index e65becd964a1..a0ea3241c651 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -109,6 +109,42 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
109 for (({ lockdep_assert_held(&(fbh)->dev->mode_config.mutex); }), \ 109 for (({ lockdep_assert_held(&(fbh)->dev->mode_config.mutex); }), \
110 i__ = 0; i__ < (fbh)->connector_count; i__++) 110 i__ = 0; i__ < (fbh)->connector_count; i__++)
111 111
112int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
113 struct drm_connector *connector)
114{
115 struct drm_fb_helper_connector *fb_conn;
116 struct drm_fb_helper_connector **temp;
117 unsigned int count;
118
119 if (!drm_fbdev_emulation)
120 return 0;
121
122 WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
123
124 count = fb_helper->connector_count + 1;
125
126 if (count > fb_helper->connector_info_alloc_count) {
127 size_t size = count * sizeof(fb_conn);
128
129 temp = krealloc(fb_helper->connector_info, size, GFP_KERNEL);
130 if (!temp)
131 return -ENOMEM;
132
133 fb_helper->connector_info_alloc_count = count;
134 fb_helper->connector_info = temp;
135 }
136
137 fb_conn = kzalloc(sizeof(*fb_conn), GFP_KERNEL);
138 if (!fb_conn)
139 return -ENOMEM;
140
141 drm_connector_get(connector);
142 fb_conn->connector = connector;
143 fb_helper->connector_info[fb_helper->connector_count++] = fb_conn;
144 return 0;
145}
146EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
147
112/** 148/**
113 * drm_fb_helper_single_add_all_connectors() - add all connectors to fbdev 149 * drm_fb_helper_single_add_all_connectors() - add all connectors to fbdev
114 * emulation helper 150 * emulation helper
@@ -162,36 +198,6 @@ out:
162} 198}
163EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors); 199EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
164 200
165int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector)
166{
167 struct drm_fb_helper_connector **temp;
168 struct drm_fb_helper_connector *fb_helper_connector;
169
170 if (!drm_fbdev_emulation)
171 return 0;
172
173 WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
174 if (fb_helper->connector_count + 1 > fb_helper->connector_info_alloc_count) {
175 temp = krealloc(fb_helper->connector_info, sizeof(struct drm_fb_helper_connector *) * (fb_helper->connector_count + 1), GFP_KERNEL);
176 if (!temp)
177 return -ENOMEM;
178
179 fb_helper->connector_info_alloc_count = fb_helper->connector_count + 1;
180 fb_helper->connector_info = temp;
181 }
182
183
184 fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
185 if (!fb_helper_connector)
186 return -ENOMEM;
187
188 drm_connector_get(connector);
189 fb_helper_connector->connector = connector;
190 fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
191 return 0;
192}
193EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
194
195int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, 201int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
196 struct drm_connector *connector) 202 struct drm_connector *connector)
197{ 203{
@@ -213,9 +219,9 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
213 fb_helper_connector = fb_helper->connector_info[i]; 219 fb_helper_connector = fb_helper->connector_info[i];
214 drm_connector_put(fb_helper_connector->connector); 220 drm_connector_put(fb_helper_connector->connector);
215 221
216 for (j = i + 1; j < fb_helper->connector_count; j++) { 222 for (j = i + 1; j < fb_helper->connector_count; j++)
217 fb_helper->connector_info[j - 1] = fb_helper->connector_info[j]; 223 fb_helper->connector_info[j - 1] = fb_helper->connector_info[j];
218 } 224
219 fb_helper->connector_count--; 225 fb_helper->connector_count--;
220 kfree(fb_helper_connector); 226 kfree(fb_helper_connector);
221 227
@@ -250,7 +256,8 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
250 g_base = r_base + crtc->gamma_size; 256 g_base = r_base + crtc->gamma_size;
251 b_base = g_base + crtc->gamma_size; 257 b_base = g_base + crtc->gamma_size;
252 258
253 crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size); 259 crtc->funcs->gamma_set(crtc, r_base, g_base, b_base,
260 crtc->gamma_size, NULL);
254} 261}
255 262
256/** 263/**
@@ -275,6 +282,9 @@ int drm_fb_helper_debug_enter(struct fb_info *info)
275 if (funcs->mode_set_base_atomic == NULL) 282 if (funcs->mode_set_base_atomic == NULL)
276 continue; 283 continue;
277 284
285 if (drm_drv_uses_atomic_modeset(mode_set->crtc->dev))
286 continue;
287
278 drm_fb_helper_save_lut_atomic(mode_set->crtc, helper); 288 drm_fb_helper_save_lut_atomic(mode_set->crtc, helper);
279 funcs->mode_set_base_atomic(mode_set->crtc, 289 funcs->mode_set_base_atomic(mode_set->crtc,
280 mode_set->fb, 290 mode_set->fb,
@@ -316,6 +326,7 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
316 326
317 for (i = 0; i < helper->crtc_count; i++) { 327 for (i = 0; i < helper->crtc_count; i++) {
318 struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set; 328 struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set;
329
319 crtc = mode_set->crtc; 330 crtc = mode_set->crtc;
320 funcs = crtc->helper_private; 331 funcs = crtc->helper_private;
321 fb = drm_mode_config_fb(crtc); 332 fb = drm_mode_config_fb(crtc);
@@ -331,6 +342,9 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
331 if (funcs->mode_set_base_atomic == NULL) 342 if (funcs->mode_set_base_atomic == NULL)
332 continue; 343 continue;
333 344
345 if (drm_drv_uses_atomic_modeset(crtc->dev))
346 continue;
347
334 drm_fb_helper_restore_lut_atomic(mode_set->crtc); 348 drm_fb_helper_restore_lut_atomic(mode_set->crtc);
335 funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x, 349 funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x,
336 crtc->y, LEAVE_ATOMIC_MODE_SET); 350 crtc->y, LEAVE_ATOMIC_MODE_SET);
@@ -346,7 +360,7 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper)
346 struct drm_plane *plane; 360 struct drm_plane *plane;
347 struct drm_atomic_state *state; 361 struct drm_atomic_state *state;
348 int i, ret; 362 int i, ret;
349 unsigned plane_mask; 363 unsigned int plane_mask;
350 364
351 state = drm_atomic_state_alloc(dev); 365 state = drm_atomic_state_alloc(dev);
352 if (!state) 366 if (!state)
@@ -378,7 +392,7 @@ retry:
378 goto fail; 392 goto fail;
379 } 393 }
380 394
381 for(i = 0; i < fb_helper->crtc_count; i++) { 395 for (i = 0; i < fb_helper->crtc_count; i++) {
382 struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set; 396 struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
383 397
384 ret = __drm_atomic_helper_set_config(mode_set, state); 398 ret = __drm_atomic_helper_set_config(mode_set, state);
@@ -404,17 +418,12 @@ backoff:
404 goto retry; 418 goto retry;
405} 419}
406 420
407static int restore_fbdev_mode(struct drm_fb_helper *fb_helper) 421static int restore_fbdev_mode_legacy(struct drm_fb_helper *fb_helper)
408{ 422{
409 struct drm_device *dev = fb_helper->dev; 423 struct drm_device *dev = fb_helper->dev;
410 struct drm_plane *plane; 424 struct drm_plane *plane;
411 int i; 425 int i;
412 426
413 drm_warn_on_modeset_not_all_locked(dev);
414
415 if (drm_drv_uses_atomic_modeset(dev))
416 return restore_fbdev_mode_atomic(fb_helper);
417
418 drm_for_each_plane(plane, dev) { 427 drm_for_each_plane(plane, dev) {
419 if (plane->type != DRM_PLANE_TYPE_PRIMARY) 428 if (plane->type != DRM_PLANE_TYPE_PRIMARY)
420 drm_plane_force_disable(plane); 429 drm_plane_force_disable(plane);
@@ -448,6 +457,18 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
448 return 0; 457 return 0;
449} 458}
450 459
460static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
461{
462 struct drm_device *dev = fb_helper->dev;
463
464 drm_warn_on_modeset_not_all_locked(dev);
465
466 if (drm_drv_uses_atomic_modeset(dev))
467 return restore_fbdev_mode_atomic(fb_helper);
468 else
469 return restore_fbdev_mode_legacy(fb_helper);
470}
471
451/** 472/**
452 * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration 473 * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
453 * @fb_helper: fbcon to restore 474 * @fb_helper: fbcon to restore
@@ -488,8 +509,10 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
488 struct drm_crtc *crtc; 509 struct drm_crtc *crtc;
489 int bound = 0, crtcs_bound = 0; 510 int bound = 0, crtcs_bound = 0;
490 511
491 /* Sometimes user space wants everything disabled, so don't steal the 512 /*
492 * display if there's a master. */ 513 * Sometimes user space wants everything disabled, so don't steal the
514 * display if there's a master.
515 */
493 if (READ_ONCE(dev->master)) 516 if (READ_ONCE(dev->master))
494 return false; 517 return false;
495 518
@@ -537,6 +560,7 @@ static bool drm_fb_helper_force_kernel_mode(void)
537static void drm_fb_helper_restore_work_fn(struct work_struct *ignored) 560static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
538{ 561{
539 bool ret; 562 bool ret;
563
540 ret = drm_fb_helper_force_kernel_mode(); 564 ret = drm_fb_helper_force_kernel_mode();
541 if (ret == true) 565 if (ret == true)
542 DRM_ERROR("Failed to restore crtc configuration\n"); 566 DRM_ERROR("Failed to restore crtc configuration\n");
@@ -870,9 +894,8 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
870 mutex_lock(&kernel_fb_helper_lock); 894 mutex_lock(&kernel_fb_helper_lock);
871 if (!list_empty(&fb_helper->kernel_fb_list)) { 895 if (!list_empty(&fb_helper->kernel_fb_list)) {
872 list_del(&fb_helper->kernel_fb_list); 896 list_del(&fb_helper->kernel_fb_list);
873 if (list_empty(&kernel_fb_helper_list)) { 897 if (list_empty(&kernel_fb_helper_list))
874 unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); 898 unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
875 }
876 } 899 }
877 mutex_unlock(&kernel_fb_helper_lock); 900 mutex_unlock(&kernel_fb_helper_lock);
878 901
@@ -1165,6 +1188,7 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
1165 (blue << info->var.blue.offset); 1188 (blue << info->var.blue.offset);
1166 if (info->var.transp.length > 0) { 1189 if (info->var.transp.length > 0) {
1167 u32 mask = (1 << info->var.transp.length) - 1; 1190 u32 mask = (1 << info->var.transp.length) - 1;
1191
1168 mask <<= info->var.transp.offset; 1192 mask <<= info->var.transp.offset;
1169 value |= mask; 1193 value |= mask;
1170 } 1194 }
@@ -1447,7 +1471,7 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
1447 struct drm_atomic_state *state; 1471 struct drm_atomic_state *state;
1448 struct drm_plane *plane; 1472 struct drm_plane *plane;
1449 int i, ret; 1473 int i, ret;
1450 unsigned plane_mask; 1474 unsigned int plane_mask;
1451 1475
1452 state = drm_atomic_state_alloc(dev); 1476 state = drm_atomic_state_alloc(dev);
1453 if (!state) 1477 if (!state)
@@ -1456,7 +1480,7 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
1456 state->acquire_ctx = dev->mode_config.acquire_ctx; 1480 state->acquire_ctx = dev->mode_config.acquire_ctx;
1457retry: 1481retry:
1458 plane_mask = 0; 1482 plane_mask = 0;
1459 for(i = 0; i < fb_helper->crtc_count; i++) { 1483 for (i = 0; i < fb_helper->crtc_count; i++) {
1460 struct drm_mode_set *mode_set; 1484 struct drm_mode_set *mode_set;
1461 1485
1462 mode_set = &fb_helper->crtc_info[i].mode_set; 1486 mode_set = &fb_helper->crtc_info[i].mode_set;
@@ -1496,34 +1520,14 @@ backoff:
1496 goto retry; 1520 goto retry;
1497} 1521}
1498 1522
1499/** 1523static int pan_display_legacy(struct fb_var_screeninfo *var,
1500 * drm_fb_helper_pan_display - implementation for &fb_ops.fb_pan_display
1501 * @var: updated screen information
1502 * @info: fbdev registered by the helper
1503 */
1504int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
1505 struct fb_info *info) 1524 struct fb_info *info)
1506{ 1525{
1507 struct drm_fb_helper *fb_helper = info->par; 1526 struct drm_fb_helper *fb_helper = info->par;
1508 struct drm_device *dev = fb_helper->dev;
1509 struct drm_mode_set *modeset; 1527 struct drm_mode_set *modeset;
1510 int ret = 0; 1528 int ret = 0;
1511 int i; 1529 int i;
1512 1530
1513 if (oops_in_progress)
1514 return -EBUSY;
1515
1516 drm_modeset_lock_all(dev);
1517 if (!drm_fb_helper_is_bound(fb_helper)) {
1518 drm_modeset_unlock_all(dev);
1519 return -EBUSY;
1520 }
1521
1522 if (drm_drv_uses_atomic_modeset(dev)) {
1523 ret = pan_display_atomic(var, info);
1524 goto unlock;
1525 }
1526
1527 for (i = 0; i < fb_helper->crtc_count; i++) { 1531 for (i = 0; i < fb_helper->crtc_count; i++) {
1528 modeset = &fb_helper->crtc_info[i].mode_set; 1532 modeset = &fb_helper->crtc_info[i].mode_set;
1529 1533
@@ -1538,8 +1542,37 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
1538 } 1542 }
1539 } 1543 }
1540 } 1544 }
1541unlock: 1545
1546 return ret;
1547}
1548
1549/**
1550 * drm_fb_helper_pan_display - implementation for &fb_ops.fb_pan_display
1551 * @var: updated screen information
1552 * @info: fbdev registered by the helper
1553 */
1554int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
1555 struct fb_info *info)
1556{
1557 struct drm_fb_helper *fb_helper = info->par;
1558 struct drm_device *dev = fb_helper->dev;
1559 int ret;
1560
1561 if (oops_in_progress)
1562 return -EBUSY;
1563
1564 drm_modeset_lock_all(dev);
1565 if (!drm_fb_helper_is_bound(fb_helper)) {
1566 drm_modeset_unlock_all(dev);
1567 return -EBUSY;
1568 }
1569
1570 if (drm_drv_uses_atomic_modeset(dev))
1571 ret = pan_display_atomic(var, info);
1572 else
1573 ret = pan_display_legacy(var, info);
1542 drm_modeset_unlock_all(dev); 1574 drm_modeset_unlock_all(dev);
1575
1543 return ret; 1576 return ret;
1544} 1577}
1545EXPORT_SYMBOL(drm_fb_helper_pan_display); 1578EXPORT_SYMBOL(drm_fb_helper_pan_display);
@@ -1561,11 +1594,10 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
1561 memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size)); 1594 memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
1562 sizes.surface_depth = 24; 1595 sizes.surface_depth = 24;
1563 sizes.surface_bpp = 32; 1596 sizes.surface_bpp = 32;
1564 sizes.fb_width = (unsigned)-1; 1597 sizes.fb_width = (u32)-1;
1565 sizes.fb_height = (unsigned)-1; 1598 sizes.fb_height = (u32)-1;
1566 1599
1567 /* if driver picks 8 or 16 by default use that 1600 /* if driver picks 8 or 16 by default use that for both depth/bpp */
1568 for both depth/bpp */
1569 if (preferred_bpp != sizes.surface_bpp) 1601 if (preferred_bpp != sizes.surface_bpp)
1570 sizes.surface_depth = sizes.surface_bpp = preferred_bpp; 1602 sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
1571 1603
@@ -1630,6 +1662,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
1630 1662
1631 for (j = 0; j < mode_set->num_connectors; j++) { 1663 for (j = 0; j < mode_set->num_connectors; j++) {
1632 struct drm_connector *connector = mode_set->connectors[j]; 1664 struct drm_connector *connector = mode_set->connectors[j];
1665
1633 if (connector->has_tile) { 1666 if (connector->has_tile) {
1634 lasth = (connector->tile_h_loc == (connector->num_h_tile - 1)); 1667 lasth = (connector->tile_h_loc == (connector->num_h_tile - 1));
1635 lastv = (connector->tile_v_loc == (connector->num_v_tile - 1)); 1668 lastv = (connector->tile_v_loc == (connector->num_v_tile - 1));
@@ -1645,8 +1678,10 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
1645 } 1678 }
1646 1679
1647 if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) { 1680 if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
1648 /* hmm everyone went away - assume VGA cable just fell out 1681 /*
1649 and will come back later. */ 1682 * hmm everyone went away - assume VGA cable just fell out
1683 * and will come back later.
1684 */
1650 DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n"); 1685 DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n");
1651 sizes.fb_width = sizes.surface_width = 1024; 1686 sizes.fb_width = sizes.surface_width = 1024;
1652 sizes.fb_height = sizes.surface_height = 768; 1687 sizes.fb_height = sizes.surface_height = 768;
@@ -1703,7 +1738,6 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
1703 info->fix.accel = FB_ACCEL_NONE; 1738 info->fix.accel = FB_ACCEL_NONE;
1704 1739
1705 info->fix.line_length = pitch; 1740 info->fix.line_length = pitch;
1706 return;
1707} 1741}
1708EXPORT_SYMBOL(drm_fb_helper_fill_fix); 1742EXPORT_SYMBOL(drm_fb_helper_fill_fix);
1709 1743
@@ -1725,6 +1759,7 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
1725 uint32_t fb_width, uint32_t fb_height) 1759 uint32_t fb_width, uint32_t fb_height)
1726{ 1760{
1727 struct drm_framebuffer *fb = fb_helper->fb; 1761 struct drm_framebuffer *fb = fb_helper->fb;
1762
1728 info->pseudo_palette = fb_helper->pseudo_palette; 1763 info->pseudo_palette = fb_helper->pseudo_palette;
1729 info->var.xres_virtual = fb->width; 1764 info->var.xres_virtual = fb->width;
1730 info->var.yres_virtual = fb->height; 1765 info->var.yres_virtual = fb->height;
@@ -2057,13 +2092,15 @@ retry:
2057 continue; 2092 continue;
2058 2093
2059 } else { 2094 } else {
2060 if (fb_helper_conn->connector->tile_h_loc != tile_pass -1 && 2095 if (fb_helper_conn->connector->tile_h_loc != tile_pass - 1 &&
2061 fb_helper_conn->connector->tile_v_loc != tile_pass - 1) 2096 fb_helper_conn->connector->tile_v_loc != tile_pass - 1)
2062 /* if this tile_pass doesn't cover any of the tiles - keep going */ 2097 /* if this tile_pass doesn't cover any of the tiles - keep going */
2063 continue; 2098 continue;
2064 2099
2065 /* find the tile offsets for this pass - need 2100 /*
2066 to find all tiles left and above */ 2101 * find the tile offsets for this pass - need to find
2102 * all tiles left and above
2103 */
2067 drm_get_tile_offsets(fb_helper, modes, offsets, 2104 drm_get_tile_offsets(fb_helper, modes, offsets,
2068 i, fb_helper_conn->connector->tile_h_loc, fb_helper_conn->connector->tile_v_loc); 2105 i, fb_helper_conn->connector->tile_h_loc, fb_helper_conn->connector->tile_v_loc);
2069 } 2106 }
@@ -2147,8 +2184,10 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
2147 if (!encoder) 2184 if (!encoder)
2148 goto out; 2185 goto out;
2149 2186
2150 /* select a crtc for this connector and then attempt to configure 2187 /*
2151 remaining connectors */ 2188 * select a crtc for this connector and then attempt to configure
2189 * remaining connectors
2190 */
2152 for (c = 0; c < fb_helper->crtc_count; c++) { 2191 for (c = 0; c < fb_helper->crtc_count; c++) {
2153 crtc = &fb_helper->crtc_info[c]; 2192 crtc = &fb_helper->crtc_info[c];
2154 2193
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index e8f9c13a0afd..fc8ef42203ec 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -24,6 +24,7 @@
24#include <drm/drmP.h> 24#include <drm/drmP.h>
25#include <drm/drm_auth.h> 25#include <drm/drm_auth.h>
26#include <drm/drm_framebuffer.h> 26#include <drm/drm_framebuffer.h>
27#include <drm/drm_atomic.h>
27 28
28#include "drm_crtc_internal.h" 29#include "drm_crtc_internal.h"
29 30
@@ -755,6 +756,117 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
755} 756}
756EXPORT_SYMBOL(drm_framebuffer_cleanup); 757EXPORT_SYMBOL(drm_framebuffer_cleanup);
757 758
759static int atomic_remove_fb(struct drm_framebuffer *fb)
760{
761 struct drm_modeset_acquire_ctx ctx;
762 struct drm_device *dev = fb->dev;
763 struct drm_atomic_state *state;
764 struct drm_plane *plane;
765 struct drm_connector *conn;
766 struct drm_connector_state *conn_state;
767 int i, ret = 0;
768 unsigned plane_mask;
769
770 state = drm_atomic_state_alloc(dev);
771 if (!state)
772 return -ENOMEM;
773
774 drm_modeset_acquire_init(&ctx, 0);
775 state->acquire_ctx = &ctx;
776
777retry:
778 plane_mask = 0;
779 ret = drm_modeset_lock_all_ctx(dev, &ctx);
780 if (ret)
781 goto unlock;
782
783 drm_for_each_plane(plane, dev) {
784 struct drm_plane_state *plane_state;
785
786 if (plane->state->fb != fb)
787 continue;
788
789 plane_state = drm_atomic_get_plane_state(state, plane);
790 if (IS_ERR(plane_state)) {
791 ret = PTR_ERR(plane_state);
792 goto unlock;
793 }
794
795 if (plane_state->crtc->primary == plane) {
796 struct drm_crtc_state *crtc_state;
797
798 crtc_state = drm_atomic_get_existing_crtc_state(state, plane_state->crtc);
799
800 ret = drm_atomic_add_affected_connectors(state, plane_state->crtc);
801 if (ret)
802 goto unlock;
803
804 crtc_state->active = false;
805 ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
806 if (ret)
807 goto unlock;
808 }
809
810 drm_atomic_set_fb_for_plane(plane_state, NULL);
811 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
812 if (ret)
813 goto unlock;
814
815 plane_mask |= BIT(drm_plane_index(plane));
816
817 plane->old_fb = plane->fb;
818 }
819
820 for_each_connector_in_state(state, conn, conn_state, i) {
821 ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
822
823 if (ret)
824 goto unlock;
825 }
826
827 if (plane_mask)
828 ret = drm_atomic_commit(state);
829
830unlock:
831 if (plane_mask)
832 drm_atomic_clean_old_fb(dev, plane_mask, ret);
833
834 if (ret == -EDEADLK) {
835 drm_modeset_backoff(&ctx);
836 goto retry;
837 }
838
839 drm_atomic_state_put(state);
840
841 drm_modeset_drop_locks(&ctx);
842 drm_modeset_acquire_fini(&ctx);
843
844 return ret;
845}
846
847static void legacy_remove_fb(struct drm_framebuffer *fb)
848{
849 struct drm_device *dev = fb->dev;
850 struct drm_crtc *crtc;
851 struct drm_plane *plane;
852
853 drm_modeset_lock_all(dev);
854 /* remove from any CRTC */
855 drm_for_each_crtc(crtc, dev) {
856 if (crtc->primary->fb == fb) {
857 /* should turn off the crtc */
858 if (drm_crtc_force_disable(crtc))
859 DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
860 }
861 }
862
863 drm_for_each_plane(plane, dev) {
864 if (plane->fb == fb)
865 drm_plane_force_disable(plane);
866 }
867 drm_modeset_unlock_all(dev);
868}
869
758/** 870/**
759 * drm_framebuffer_remove - remove and unreference a framebuffer object 871 * drm_framebuffer_remove - remove and unreference a framebuffer object
760 * @fb: framebuffer to remove 872 * @fb: framebuffer to remove
@@ -770,8 +882,6 @@ EXPORT_SYMBOL(drm_framebuffer_cleanup);
770void drm_framebuffer_remove(struct drm_framebuffer *fb) 882void drm_framebuffer_remove(struct drm_framebuffer *fb)
771{ 883{
772 struct drm_device *dev; 884 struct drm_device *dev;
773 struct drm_crtc *crtc;
774 struct drm_plane *plane;
775 885
776 if (!fb) 886 if (!fb)
777 return; 887 return;
@@ -797,29 +907,12 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
797 */ 907 */
798 if (drm_framebuffer_read_refcount(fb) > 1) { 908 if (drm_framebuffer_read_refcount(fb) > 1) {
799 if (drm_drv_uses_atomic_modeset(dev)) { 909 if (drm_drv_uses_atomic_modeset(dev)) {
800 int ret = drm_atomic_remove_fb(fb); 910 int ret = atomic_remove_fb(fb);
801 WARN(ret, "atomic remove_fb failed with %i\n", ret); 911 WARN(ret, "atomic remove_fb failed with %i\n", ret);
802 goto out; 912 } else
803 } 913 legacy_remove_fb(fb);
804
805 drm_modeset_lock_all(dev);
806 /* remove from any CRTC */
807 drm_for_each_crtc(crtc, dev) {
808 if (crtc->primary->fb == fb) {
809 /* should turn off the crtc */
810 if (drm_crtc_force_disable(crtc))
811 DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
812 }
813 }
814
815 drm_for_each_plane(plane, dev) {
816 if (plane->fb == fb)
817 drm_plane_force_disable(plane);
818 }
819 drm_modeset_unlock_all(dev);
820 } 914 }
821 915
822out:
823 drm_framebuffer_put(fb); 916 drm_framebuffer_put(fb);
824} 917}
825EXPORT_SYMBOL(drm_framebuffer_remove); 918EXPORT_SYMBOL(drm_framebuffer_remove);
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index b134482f4022..ae386783e3ea 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -1,4 +1,4 @@
1/** 1/*
2 * \file drm_ioc32.c 2 * \file drm_ioc32.c
3 * 3 *
4 * 32-bit ioctl compatibility routines for the DRM. 4 * 32-bit ioctl compatibility routines for the DRM.
@@ -72,15 +72,15 @@
72#define DRM_IOCTL_MODE_ADDFB232 DRM_IOWR(0xb8, drm_mode_fb_cmd232_t) 72#define DRM_IOCTL_MODE_ADDFB232 DRM_IOWR(0xb8, drm_mode_fb_cmd232_t)
73 73
74typedef struct drm_version_32 { 74typedef struct drm_version_32 {
75 int version_major; /**< Major version */ 75 int version_major; /* Major version */
76 int version_minor; /**< Minor version */ 76 int version_minor; /* Minor version */
77 int version_patchlevel; /**< Patch level */ 77 int version_patchlevel; /* Patch level */
78 u32 name_len; /**< Length of name buffer */ 78 u32 name_len; /* Length of name buffer */
79 u32 name; /**< Name of driver */ 79 u32 name; /* Name of driver */
80 u32 date_len; /**< Length of date buffer */ 80 u32 date_len; /* Length of date buffer */
81 u32 date; /**< User-space buffer to hold date */ 81 u32 date; /* User-space buffer to hold date */
82 u32 desc_len; /**< Length of desc buffer */ 82 u32 desc_len; /* Length of desc buffer */
83 u32 desc; /**< User-space buffer to hold desc */ 83 u32 desc; /* User-space buffer to hold desc */
84} drm_version32_t; 84} drm_version32_t;
85 85
86static int compat_drm_version(struct file *file, unsigned int cmd, 86static int compat_drm_version(struct file *file, unsigned int cmd,
@@ -126,8 +126,8 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
126} 126}
127 127
128typedef struct drm_unique32 { 128typedef struct drm_unique32 {
129 u32 unique_len; /**< Length of unique */ 129 u32 unique_len; /* Length of unique */
130 u32 unique; /**< Unique name for driver instantiation */ 130 u32 unique; /* Unique name for driver instantiation */
131} drm_unique32_t; 131} drm_unique32_t;
132 132
133static int compat_drm_getunique(struct file *file, unsigned int cmd, 133static int compat_drm_getunique(struct file *file, unsigned int cmd,
@@ -180,12 +180,12 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd,
180} 180}
181 181
182typedef struct drm_map32 { 182typedef struct drm_map32 {
183 u32 offset; /**< Requested physical address (0 for SAREA)*/ 183 u32 offset; /* Requested physical address (0 for SAREA) */
184 u32 size; /**< Requested physical size (bytes) */ 184 u32 size; /* Requested physical size (bytes) */
185 enum drm_map_type type; /**< Type of memory to map */ 185 enum drm_map_type type; /* Type of memory to map */
186 enum drm_map_flags flags; /**< Flags */ 186 enum drm_map_flags flags; /* Flags */
187 u32 handle; /**< User-space: "Handle" to pass to mmap() */ 187 u32 handle; /* User-space: "Handle" to pass to mmap() */
188 int mtrr; /**< MTRR slot used */ 188 int mtrr; /* MTRR slot used */
189} drm_map32_t; 189} drm_map32_t;
190 190
191static int compat_drm_getmap(struct file *file, unsigned int cmd, 191static int compat_drm_getmap(struct file *file, unsigned int cmd,
@@ -286,12 +286,12 @@ static int compat_drm_rmmap(struct file *file, unsigned int cmd,
286} 286}
287 287
288typedef struct drm_client32 { 288typedef struct drm_client32 {
289 int idx; /**< Which client desired? */ 289 int idx; /* Which client desired? */
290 int auth; /**< Is client authenticated? */ 290 int auth; /* Is client authenticated? */
291 u32 pid; /**< Process ID */ 291 u32 pid; /* Process ID */
292 u32 uid; /**< User ID */ 292 u32 uid; /* User ID */
293 u32 magic; /**< Magic */ 293 u32 magic; /* Magic */
294 u32 iocs; /**< Ioctl count */ 294 u32 iocs; /* Ioctl count */
295} drm_client32_t; 295} drm_client32_t;
296 296
297static int compat_drm_getclient(struct file *file, unsigned int cmd, 297static int compat_drm_getclient(struct file *file, unsigned int cmd,
@@ -366,12 +366,12 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
366} 366}
367 367
368typedef struct drm_buf_desc32 { 368typedef struct drm_buf_desc32 {
369 int count; /**< Number of buffers of this size */ 369 int count; /* Number of buffers of this size */
370 int size; /**< Size in bytes */ 370 int size; /* Size in bytes */
371 int low_mark; /**< Low water mark */ 371 int low_mark; /* Low water mark */
372 int high_mark; /**< High water mark */ 372 int high_mark; /* High water mark */
373 int flags; 373 int flags;
374 u32 agp_start; /**< Start address in the AGP aperture */ 374 u32 agp_start; /* Start address in the AGP aperture */
375} drm_buf_desc32_t; 375} drm_buf_desc32_t;
376 376
377static int compat_drm_addbufs(struct file *file, unsigned int cmd, 377static int compat_drm_addbufs(struct file *file, unsigned int cmd,
@@ -1111,13 +1111,18 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
1111}; 1111};
1112 1112
1113/** 1113/**
1114 * Called whenever a 32-bit process running under a 64-bit kernel 1114 * drm_compat_ioctl - 32bit IOCTL compatibility handler for DRM drivers
1115 * performs an ioctl on /dev/drm. 1115 * @filp: file this ioctl is called on
1116 * @cmd: ioctl cmd number
1117 * @arg: user argument
1118 *
1119 * Compatibility handler for 32 bit userspace running on 64 kernels. All actual
1120 * IOCTL handling is forwarded to drm_ioctl(), while marshalling structures as
1121 * appropriate. Note that this only handles DRM core IOCTLs, if the driver has
1122 * botched IOCTL itself, it must handle those by wrapping this function.
1116 * 1123 *
1117 * \param file_priv DRM file private. 1124 * Returns:
1118 * \param cmd command. 1125 * Zero on success, negative error code on failure.
1119 * \param arg user argument.
1120 * \return zero on success or negative number on failure.
1121 */ 1126 */
1122long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 1127long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1123{ 1128{
@@ -1141,5 +1146,4 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1141 1146
1142 return ret; 1147 return ret;
1143} 1148}
1144
1145EXPORT_SYMBOL(drm_compat_ioctl); 1149EXPORT_SYMBOL(drm_compat_ioctl);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 7d6deaa91281..865e3ee4d743 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -286,6 +286,9 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
286 case DRM_CAP_ADDFB2_MODIFIERS: 286 case DRM_CAP_ADDFB2_MODIFIERS:
287 req->value = dev->mode_config.allow_fb_modifiers; 287 req->value = dev->mode_config.allow_fb_modifiers;
288 break; 288 break;
289 case DRM_CAP_CRTC_IN_VBLANK_EVENT:
290 req->value = 1;
291 break;
289 default: 292 default:
290 return -EINVAL; 293 return -EINVAL;
291 } 294 }
@@ -647,13 +650,59 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
647#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) 650#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
648 651
649/** 652/**
653 * DOC: driver specific ioctls
654 *
655 * First things first, driver private IOCTLs should only be needed for drivers
656 * supporting rendering. Kernel modesetting is all standardized, and extended
657 * through properties. There are a few exceptions in some existing drivers,
658 * which define IOCTL for use by the display DRM master, but they all predate
659 * properties.
660 *
661 * Now if you do have a render driver you always have to support it through
662 * driver private properties. There's a few steps needed to wire all the things
663 * up.
664 *
665 * First you need to define the structure for your IOCTL in your driver private
666 * UAPI header in ``include/uapi/drm/my_driver_drm.h``::
667 *
668 * struct my_driver_operation {
669 * u32 some_thing;
670 * u32 another_thing;
671 * };
672 *
673 * Please make sure that you follow all the best practices from
674 * ``Documentation/ioctl/botching-up-ioctls.txt``. Note that drm_ioctl()
675 * automatically zero-extends structures, hence make sure you can add more stuff
676 * at the end, i.e. don't put a variable sized array there.
677 *
678 * Then you need to define your IOCTL number, using one of DRM_IO(), DRM_IOR(),
679 * DRM_IOW() or DRM_IOWR(). It must start with the DRM_IOCTL\_ prefix::
680 *
681 * ##define DRM_IOCTL_MY_DRIVER_OPERATION \
682 * DRM_IOW(DRM_COMMAND_BASE, struct my_driver_operation)
683 *
684 * DRM driver private IOCTL must be in the range from DRM_COMMAND_BASE to
685 * DRM_COMMAND_END. Finally you need an array of &struct drm_ioctl_desc to wire
686 * up the handlers and set the access rights:
687 *
688 * static const struct drm_ioctl_desc my_driver_ioctls[] = {
689 * DRM_IOCTL_DEF_DRV(MY_DRIVER_OPERATION, my_driver_operation,
690 * DRM_AUTH|DRM_RENDER_ALLOW),
691 * };
692 *
693 * And then assign this to the &drm_driver.ioctls field in your driver
694 * structure.
695 */
696
697/**
650 * drm_ioctl - ioctl callback implementation for DRM drivers 698 * drm_ioctl - ioctl callback implementation for DRM drivers
651 * @filp: file this ioctl is called on 699 * @filp: file this ioctl is called on
652 * @cmd: ioctl cmd number 700 * @cmd: ioctl cmd number
653 * @arg: user argument 701 * @arg: user argument
654 * 702 *
655 * Looks up the ioctl function in the ::ioctls table, checking for root 703 * Looks up the ioctl function in the DRM core and the driver dispatch table,
656 * previleges if so required, and dispatches to the respective function. 704 * stored in &drm_driver.ioctls. It checks for necessary permission by calling
705 * drm_ioctl_permit(), and dispatches to the respective function.
657 * 706 *
658 * Returns: 707 * Returns:
659 * Zero on success, negative error code on failure. 708 * Zero on success, negative error code on failure.
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index dac1b2593cb1..8c866cac62dd 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1026,6 +1026,7 @@ void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
1026 1026
1027 e->pipe = pipe; 1027 e->pipe = pipe;
1028 e->event.sequence = drm_vblank_count(dev, pipe); 1028 e->event.sequence = drm_vblank_count(dev, pipe);
1029 e->event.crtc_id = crtc->base.id;
1029 list_add_tail(&e->base.link, &dev->vblank_event_list); 1030 list_add_tail(&e->base.link, &dev->vblank_event_list);
1030} 1031}
1031EXPORT_SYMBOL(drm_crtc_arm_vblank_event); 1032EXPORT_SYMBOL(drm_crtc_arm_vblank_event);
@@ -1056,6 +1057,7 @@ void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
1056 now = get_drm_timestamp(); 1057 now = get_drm_timestamp();
1057 } 1058 }
1058 e->pipe = pipe; 1059 e->pipe = pipe;
1060 e->event.crtc_id = crtc->base.id;
1059 send_vblank_event(dev, e, seq, &now); 1061 send_vblank_event(dev, e, seq, &now);
1060} 1062}
1061EXPORT_SYMBOL(drm_crtc_send_vblank_event); 1063EXPORT_SYMBOL(drm_crtc_send_vblank_event);
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index bf60f2645e55..64ef09a6cccb 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -149,108 +149,6 @@ void drm_modeset_unlock_all(struct drm_device *dev)
149EXPORT_SYMBOL(drm_modeset_unlock_all); 149EXPORT_SYMBOL(drm_modeset_unlock_all);
150 150
151/** 151/**
152 * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx for a plane update
153 * @crtc: DRM CRTC
154 * @plane: DRM plane to be updated on @crtc
155 *
156 * This function locks the given crtc and plane (which should be either the
157 * primary or cursor plane) using a hidden acquire context. This is necessary so
158 * that drivers internally using the atomic interfaces can grab further locks
159 * with the lock acquire context.
160 *
161 * Note that @plane can be NULL, e.g. when the cursor support hasn't yet been
162 * converted to universal planes yet.
163 */
164void drm_modeset_lock_crtc(struct drm_crtc *crtc,
165 struct drm_plane *plane)
166{
167 struct drm_modeset_acquire_ctx *ctx;
168 int ret;
169
170 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
171 if (WARN_ON(!ctx))
172 return;
173
174 drm_modeset_acquire_init(ctx, 0);
175
176retry:
177 ret = drm_modeset_lock(&crtc->mutex, ctx);
178 if (ret)
179 goto fail;
180
181 if (plane) {
182 ret = drm_modeset_lock(&plane->mutex, ctx);
183 if (ret)
184 goto fail;
185
186 if (plane->crtc) {
187 ret = drm_modeset_lock(&plane->crtc->mutex, ctx);
188 if (ret)
189 goto fail;
190 }
191 }
192
193 WARN_ON(crtc->acquire_ctx);
194
195 /* now we hold the locks, so now that it is safe, stash the
196 * ctx for drm_modeset_unlock_crtc():
197 */
198 crtc->acquire_ctx = ctx;
199
200 return;
201
202fail:
203 if (ret == -EDEADLK) {
204 drm_modeset_backoff(ctx);
205 goto retry;
206 }
207}
208EXPORT_SYMBOL(drm_modeset_lock_crtc);
209
210/**
211 * drm_modeset_legacy_acquire_ctx - find acquire ctx for legacy ioctls
212 * @crtc: drm crtc
213 *
214 * Legacy ioctl operations like cursor updates or page flips only have per-crtc
215 * locking, and store the acquire ctx in the corresponding crtc. All other
216 * legacy operations take all locks and use a global acquire context. This
217 * function grabs the right one.
218 */
219struct drm_modeset_acquire_ctx *
220drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc)
221{
222 if (crtc->acquire_ctx)
223 return crtc->acquire_ctx;
224
225 WARN_ON(!crtc->dev->mode_config.acquire_ctx);
226
227 return crtc->dev->mode_config.acquire_ctx;
228}
229EXPORT_SYMBOL(drm_modeset_legacy_acquire_ctx);
230
231/**
232 * drm_modeset_unlock_crtc - drop crtc lock
233 * @crtc: drm crtc
234 *
235 * This drops the crtc lock acquire with drm_modeset_lock_crtc() and all other
236 * locks acquired through the hidden context.
237 */
238void drm_modeset_unlock_crtc(struct drm_crtc *crtc)
239{
240 struct drm_modeset_acquire_ctx *ctx = crtc->acquire_ctx;
241
242 if (WARN_ON(!ctx))
243 return;
244
245 crtc->acquire_ctx = NULL;
246 drm_modeset_drop_locks(ctx);
247 drm_modeset_acquire_fini(ctx);
248
249 kfree(ctx);
250}
251EXPORT_SYMBOL(drm_modeset_unlock_crtc);
252
253/**
254 * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked 152 * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
255 * @dev: device 153 * @dev: device
256 * 154 *
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index b5f2f0fece99..2120f33bdf4a 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -3,8 +3,10 @@
3#include <linux/list.h> 3#include <linux/list.h>
4#include <linux/of_graph.h> 4#include <linux/of_graph.h>
5#include <drm/drmP.h> 5#include <drm/drmP.h>
6#include <drm/drm_bridge.h>
6#include <drm/drm_crtc.h> 7#include <drm/drm_crtc.h>
7#include <drm/drm_encoder.h> 8#include <drm/drm_encoder.h>
9#include <drm/drm_panel.h>
8#include <drm/drm_of.h> 10#include <drm/drm_of.h>
9 11
10static void drm_release_of(struct device *dev, void *data) 12static void drm_release_of(struct device *dev, void *data)
@@ -208,3 +210,53 @@ int drm_of_encoder_active_endpoint(struct device_node *node,
208 return -EINVAL; 210 return -EINVAL;
209} 211}
210EXPORT_SYMBOL_GPL(drm_of_encoder_active_endpoint); 212EXPORT_SYMBOL_GPL(drm_of_encoder_active_endpoint);
213
214/*
215 * drm_of_find_panel_or_bridge - return connected panel or bridge device
216 * @np: device tree node containing encoder output ports
217 * @panel: pointer to hold returned drm_panel
218 * @bridge: pointer to hold returned drm_bridge
219 *
220 * Given a DT node's port and endpoint number, find the connected node and
221 * return either the associated struct drm_panel or drm_bridge device. Either
222 * @panel or @bridge must not be NULL.
223 *
224 * Returns zero if successful, or one of the standard error codes if it fails.
225 */
226int drm_of_find_panel_or_bridge(const struct device_node *np,
227 int port, int endpoint,
228 struct drm_panel **panel,
229 struct drm_bridge **bridge)
230{
231 int ret = -EPROBE_DEFER;
232 struct device_node *remote;
233
234 if (!panel && !bridge)
235 return -EINVAL;
236
237 remote = of_graph_get_remote_node(np, port, endpoint);
238 if (!remote)
239 return -ENODEV;
240
241 if (panel) {
242 *panel = of_drm_find_panel(remote);
243 if (*panel)
244 ret = 0;
245 }
246
247 /* No panel found yet, check for a bridge next. */
248 if (bridge) {
249 if (ret) {
250 *bridge = of_drm_find_bridge(remote);
251 if (*bridge)
252 ret = 0;
253 } else {
254 *bridge = NULL;
255 }
256
257 }
258
259 of_node_put(remote);
260 return ret;
261}
262EXPORT_SYMBOL_GPL(drm_of_find_panel_or_bridge);
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index bc71aa2b7872..fedd4d60d9cd 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -620,7 +620,8 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
620 620
621static int drm_mode_cursor_universal(struct drm_crtc *crtc, 621static int drm_mode_cursor_universal(struct drm_crtc *crtc,
622 struct drm_mode_cursor2 *req, 622 struct drm_mode_cursor2 *req,
623 struct drm_file *file_priv) 623 struct drm_file *file_priv,
624 struct drm_modeset_acquire_ctx *ctx)
624{ 625{
625 struct drm_device *dev = crtc->dev; 626 struct drm_device *dev = crtc->dev;
626 struct drm_framebuffer *fb = NULL; 627 struct drm_framebuffer *fb = NULL;
@@ -634,21 +635,11 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
634 int32_t crtc_x, crtc_y; 635 int32_t crtc_x, crtc_y;
635 uint32_t crtc_w = 0, crtc_h = 0; 636 uint32_t crtc_w = 0, crtc_h = 0;
636 uint32_t src_w = 0, src_h = 0; 637 uint32_t src_w = 0, src_h = 0;
637 struct drm_modeset_acquire_ctx ctx;
638 int ret = 0; 638 int ret = 0;
639 639
640 BUG_ON(!crtc->cursor); 640 BUG_ON(!crtc->cursor);
641 WARN_ON(crtc->cursor->crtc != crtc && crtc->cursor->crtc != NULL); 641 WARN_ON(crtc->cursor->crtc != crtc && crtc->cursor->crtc != NULL);
642 642
643 drm_modeset_acquire_init(&ctx, 0);
644retry:
645 ret = drm_modeset_lock(&crtc->mutex, &ctx);
646 if (ret)
647 goto fail;
648 ret = drm_modeset_lock(&crtc->cursor->mutex, &ctx);
649 if (ret)
650 goto fail;
651
652 /* 643 /*
653 * Obtain fb we'll be using (either new or existing) and take an extra 644 * Obtain fb we'll be using (either new or existing) and take an extra
654 * reference to it if fb != null. setplane will take care of dropping 645 * reference to it if fb != null. setplane will take care of dropping
@@ -693,7 +684,7 @@ retry:
693 */ 684 */
694 ret = __setplane_internal(crtc->cursor, crtc, fb, 685 ret = __setplane_internal(crtc->cursor, crtc, fb,
695 crtc_x, crtc_y, crtc_w, crtc_h, 686 crtc_x, crtc_y, crtc_w, crtc_h,
696 0, 0, src_w, src_h, &ctx); 687 0, 0, src_w, src_h, ctx);
697 688
698 /* Update successful; save new cursor position, if necessary */ 689 /* Update successful; save new cursor position, if necessary */
699 if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) { 690 if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {
@@ -701,15 +692,6 @@ retry:
701 crtc->cursor_y = req->y; 692 crtc->cursor_y = req->y;
702 } 693 }
703 694
704fail:
705 if (ret == -EDEADLK) {
706 drm_modeset_backoff(&ctx);
707 goto retry;
708 }
709
710 drm_modeset_drop_locks(&ctx);
711 drm_modeset_acquire_fini(&ctx);
712
713 return ret; 695 return ret;
714} 696}
715 697
@@ -718,6 +700,7 @@ static int drm_mode_cursor_common(struct drm_device *dev,
718 struct drm_file *file_priv) 700 struct drm_file *file_priv)
719{ 701{
720 struct drm_crtc *crtc; 702 struct drm_crtc *crtc;
703 struct drm_modeset_acquire_ctx ctx;
721 int ret = 0; 704 int ret = 0;
722 705
723 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 706 if (!drm_core_check_feature(dev, DRIVER_MODESET))
@@ -732,14 +715,24 @@ static int drm_mode_cursor_common(struct drm_device *dev,
732 return -ENOENT; 715 return -ENOENT;
733 } 716 }
734 717
718 drm_modeset_acquire_init(&ctx, 0);
719retry:
720 ret = drm_modeset_lock(&crtc->mutex, &ctx);
721 if (ret)
722 goto out;
735 /* 723 /*
736 * If this crtc has a universal cursor plane, call that plane's update 724 * If this crtc has a universal cursor plane, call that plane's update
737 * handler rather than using legacy cursor handlers. 725 * handler rather than using legacy cursor handlers.
738 */ 726 */
739 if (crtc->cursor) 727 if (crtc->cursor) {
740 return drm_mode_cursor_universal(crtc, req, file_priv); 728 ret = drm_modeset_lock(&crtc->cursor->mutex, &ctx);
729 if (ret)
730 goto out;
731
732 ret = drm_mode_cursor_universal(crtc, req, file_priv, &ctx);
733 goto out;
734 }
741 735
742 drm_modeset_lock_crtc(crtc, crtc->cursor);
743 if (req->flags & DRM_MODE_CURSOR_BO) { 736 if (req->flags & DRM_MODE_CURSOR_BO) {
744 if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) { 737 if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
745 ret = -ENXIO; 738 ret = -ENXIO;
@@ -763,7 +756,13 @@ static int drm_mode_cursor_common(struct drm_device *dev,
763 } 756 }
764 } 757 }
765out: 758out:
766 drm_modeset_unlock_crtc(crtc); 759 if (ret == -EDEADLK) {
760 drm_modeset_backoff(&ctx);
761 goto retry;
762 }
763
764 drm_modeset_drop_locks(&ctx);
765 drm_modeset_acquire_fini(&ctx);
767 766
768 return ret; 767 return ret;
769 768
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 9fb65b736a90..954eb848b5e2 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -403,10 +403,10 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
403 .map_dma_buf = drm_gem_map_dma_buf, 403 .map_dma_buf = drm_gem_map_dma_buf,
404 .unmap_dma_buf = drm_gem_unmap_dma_buf, 404 .unmap_dma_buf = drm_gem_unmap_dma_buf,
405 .release = drm_gem_dmabuf_release, 405 .release = drm_gem_dmabuf_release,
406 .kmap = drm_gem_dmabuf_kmap, 406 .map = drm_gem_dmabuf_kmap,
407 .kmap_atomic = drm_gem_dmabuf_kmap_atomic, 407 .map_atomic = drm_gem_dmabuf_kmap_atomic,
408 .kunmap = drm_gem_dmabuf_kunmap, 408 .unmap = drm_gem_dmabuf_kunmap,
409 .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic, 409 .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
410 .mmap = drm_gem_dmabuf_mmap, 410 .mmap = drm_gem_dmabuf_mmap,
411 .vmap = drm_gem_dmabuf_vmap, 411 .vmap = drm_gem_dmabuf_vmap,
412 .vunmap = drm_gem_dmabuf_vunmap, 412 .vunmap = drm_gem_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 85005d57bde6..1b0c14ab3fff 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -44,7 +44,7 @@
44 * 44 *
45 * This library provides some helper code for output probing. It provides an 45 * This library provides some helper code for output probing. It provides an
46 * implementation of the core &drm_connector_funcs.fill_modes interface with 46 * implementation of the core &drm_connector_funcs.fill_modes interface with
47 * drm_helper_probe_single_connector_modes. 47 * drm_helper_probe_single_connector_modes().
48 * 48 *
49 * It also provides support for polling connectors with a work item and for 49 * It also provides support for polling connectors with a work item and for
50 * generic hotplug interrupt handling where the driver doesn't or cannot keep 50 * generic hotplug interrupt handling where the driver doesn't or cannot keep
@@ -169,12 +169,73 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
169EXPORT_SYMBOL(drm_kms_helper_poll_enable); 169EXPORT_SYMBOL(drm_kms_helper_poll_enable);
170 170
171static enum drm_connector_status 171static enum drm_connector_status
172drm_connector_detect(struct drm_connector *connector, bool force) 172drm_helper_probe_detect_ctx(struct drm_connector *connector, bool force)
173{ 173{
174 return connector->funcs->detect ? 174 const struct drm_connector_helper_funcs *funcs = connector->helper_private;
175 connector->funcs->detect(connector, force) : 175 struct drm_modeset_acquire_ctx ctx;
176 connector_status_connected; 176 int ret;
177
178 drm_modeset_acquire_init(&ctx, 0);
179
180retry:
181 ret = drm_modeset_lock(&connector->dev->mode_config.connection_mutex, &ctx);
182 if (!ret) {
183 if (funcs->detect_ctx)
184 ret = funcs->detect_ctx(connector, &ctx, force);
185 else if (connector->funcs->detect)
186 ret = connector->funcs->detect(connector, force);
187 else
188 ret = connector_status_connected;
189 }
190
191 if (ret == -EDEADLK) {
192 drm_modeset_backoff(&ctx);
193 goto retry;
194 }
195
196 if (WARN_ON(ret < 0))
197 ret = connector_status_unknown;
198
199 drm_modeset_drop_locks(&ctx);
200 drm_modeset_acquire_fini(&ctx);
201
202 return ret;
203}
204
205/**
206 * drm_helper_probe_detect - probe connector status
207 * @connector: connector to probe
208 * @ctx: acquire_ctx, or NULL to let this function handle locking.
209 * @force: Whether destructive probe operations should be performed.
210 *
211 * This function calls the detect callbacks of the connector.
212 * This function returns &drm_connector_status, or
213 * if @ctx is set, it might also return -EDEADLK.
214 */
215int
216drm_helper_probe_detect(struct drm_connector *connector,
217 struct drm_modeset_acquire_ctx *ctx,
218 bool force)
219{
220 const struct drm_connector_helper_funcs *funcs = connector->helper_private;
221 struct drm_device *dev = connector->dev;
222 int ret;
223
224 if (!ctx)
225 return drm_helper_probe_detect_ctx(connector, force);
226
227 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
228 if (ret)
229 return ret;
230
231 if (funcs->detect_ctx)
232 return funcs->detect_ctx(connector, ctx, force);
233 else if (connector->funcs->detect)
234 return connector->funcs->detect(connector, force);
235 else
236 return connector_status_connected;
177} 237}
238EXPORT_SYMBOL(drm_helper_probe_detect);
178 239
179/** 240/**
180 * drm_helper_probe_single_connector_modes - get complete set of display modes 241 * drm_helper_probe_single_connector_modes - get complete set of display modes
@@ -239,15 +300,27 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
239 struct drm_display_mode *mode; 300 struct drm_display_mode *mode;
240 const struct drm_connector_helper_funcs *connector_funcs = 301 const struct drm_connector_helper_funcs *connector_funcs =
241 connector->helper_private; 302 connector->helper_private;
242 int count = 0; 303 int count = 0, ret;
243 int mode_flags = 0; 304 int mode_flags = 0;
244 bool verbose_prune = true; 305 bool verbose_prune = true;
245 enum drm_connector_status old_status; 306 enum drm_connector_status old_status;
307 struct drm_modeset_acquire_ctx ctx;
246 308
247 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 309 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
248 310
311 drm_modeset_acquire_init(&ctx, 0);
312
249 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 313 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
250 connector->name); 314 connector->name);
315
316retry:
317 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
318 if (ret == -EDEADLK) {
319 drm_modeset_backoff(&ctx);
320 goto retry;
321 } else
322 WARN_ON(ret < 0);
323
251 /* set all old modes to the stale state */ 324 /* set all old modes to the stale state */
252 list_for_each_entry(mode, &connector->modes, head) 325 list_for_each_entry(mode, &connector->modes, head)
253 mode->status = MODE_STALE; 326 mode->status = MODE_STALE;
@@ -263,7 +336,15 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
263 if (connector->funcs->force) 336 if (connector->funcs->force)
264 connector->funcs->force(connector); 337 connector->funcs->force(connector);
265 } else { 338 } else {
266 connector->status = drm_connector_detect(connector, true); 339 ret = drm_helper_probe_detect(connector, &ctx, true);
340
341 if (ret == -EDEADLK) {
342 drm_modeset_backoff(&ctx);
343 goto retry;
344 } else if (WARN(ret < 0, "Invalid return value %i for connector detection\n", ret))
345 ret = connector_status_unknown;
346
347 connector->status = ret;
267 } 348 }
268 349
269 /* 350 /*
@@ -355,6 +436,9 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
355prune: 436prune:
356 drm_mode_prune_invalid(dev, &connector->modes, verbose_prune); 437 drm_mode_prune_invalid(dev, &connector->modes, verbose_prune);
357 438
439 drm_modeset_drop_locks(&ctx);
440 drm_modeset_acquire_fini(&ctx);
441
358 if (list_empty(&connector->modes)) 442 if (list_empty(&connector->modes))
359 return 0; 443 return 0;
360 444
@@ -440,7 +524,7 @@ static void output_poll_execute(struct work_struct *work)
440 524
441 repoll = true; 525 repoll = true;
442 526
443 connector->status = drm_connector_detect(connector, false); 527 connector->status = drm_helper_probe_detect(connector, NULL, false);
444 if (old_status != connector->status) { 528 if (old_status != connector->status) {
445 const char *old, *new; 529 const char *old, *new;
446 530
@@ -588,7 +672,7 @@ bool drm_helper_hpd_irq_event(struct drm_device *dev)
588 672
589 old_status = connector->status; 673 old_status = connector->status;
590 674
591 connector->status = drm_connector_detect(connector, false); 675 connector->status = drm_helper_probe_detect(connector, NULL, false);
592 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", 676 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
593 connector->base.id, 677 connector->base.id,
594 connector->name, 678 connector->name,
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index b17959c3e099..3e88fa24eab3 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -442,8 +442,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
442 struct drm_property *property; 442 struct drm_property *property;
443 int enum_count = 0; 443 int enum_count = 0;
444 int value_count = 0; 444 int value_count = 0;
445 int ret = 0, i; 445 int i, copied;
446 int copied;
447 struct drm_property_enum *prop_enum; 446 struct drm_property_enum *prop_enum;
448 struct drm_mode_property_enum __user *enum_ptr; 447 struct drm_mode_property_enum __user *enum_ptr;
449 uint64_t __user *values_ptr; 448 uint64_t __user *values_ptr;
@@ -451,55 +450,43 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
451 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 450 if (!drm_core_check_feature(dev, DRIVER_MODESET))
452 return -EINVAL; 451 return -EINVAL;
453 452
454 drm_modeset_lock_all(dev);
455 property = drm_property_find(dev, out_resp->prop_id); 453 property = drm_property_find(dev, out_resp->prop_id);
456 if (!property) { 454 if (!property)
457 ret = -ENOENT; 455 return -ENOENT;
458 goto done;
459 }
460
461 if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
462 drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
463 list_for_each_entry(prop_enum, &property->enum_list, head)
464 enum_count++;
465 }
466
467 value_count = property->num_values;
468 456
469 strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN); 457 strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
470 out_resp->name[DRM_PROP_NAME_LEN-1] = 0; 458 out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
471 out_resp->flags = property->flags; 459 out_resp->flags = property->flags;
472 460
473 if ((out_resp->count_values >= value_count) && value_count) { 461 value_count = property->num_values;
474 values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr; 462 values_ptr = u64_to_user_ptr(out_resp->values_ptr);
475 for (i = 0; i < value_count; i++) { 463
476 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) { 464 for (i = 0; i < value_count; i++) {
477 ret = -EFAULT; 465 if (i < out_resp->count_values &&
478 goto done; 466 put_user(property->values[i], values_ptr + i)) {
479 } 467 return -EFAULT;
480 } 468 }
481 } 469 }
482 out_resp->count_values = value_count; 470 out_resp->count_values = value_count;
483 471
472 copied = 0;
473 enum_ptr = u64_to_user_ptr(out_resp->enum_blob_ptr);
474
484 if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) || 475 if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
485 drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) { 476 drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
486 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) { 477 list_for_each_entry(prop_enum, &property->enum_list, head) {
487 copied = 0; 478 enum_count++;
488 enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr; 479 if (out_resp->count_enum_blobs < enum_count)
489 list_for_each_entry(prop_enum, &property->enum_list, head) { 480 continue;
490 481
491 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) { 482 if (copy_to_user(&enum_ptr[copied].value,
492 ret = -EFAULT; 483 &prop_enum->value, sizeof(uint64_t)))
493 goto done; 484 return -EFAULT;
494 } 485
495 486 if (copy_to_user(&enum_ptr[copied].name,
496 if (copy_to_user(&enum_ptr[copied].name, 487 &prop_enum->name, DRM_PROP_NAME_LEN))
497 &prop_enum->name, DRM_PROP_NAME_LEN)) { 488 return -EFAULT;
498 ret = -EFAULT; 489 copied++;
499 goto done;
500 }
501 copied++;
502 }
503 } 490 }
504 out_resp->count_enum_blobs = enum_count; 491 out_resp->count_enum_blobs = enum_count;
505 } 492 }
@@ -514,9 +501,8 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
514 */ 501 */
515 if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) 502 if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
516 out_resp->count_enum_blobs = 0; 503 out_resp->count_enum_blobs = 0;
517done: 504
518 drm_modeset_unlock_all(dev); 505 return 0;
519 return ret;
520} 506}
521 507
522static void drm_property_free_blob(struct kref *kref) 508static void drm_property_free_blob(struct kref *kref)
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 513288b5c2f6..1c5b5ce1fd7f 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -25,6 +25,20 @@
25#define to_drm_minor(d) dev_get_drvdata(d) 25#define to_drm_minor(d) dev_get_drvdata(d)
26#define to_drm_connector(d) dev_get_drvdata(d) 26#define to_drm_connector(d) dev_get_drvdata(d)
27 27
28/**
29 * DOC: overview
30 *
31 * DRM provides very little additional support to drivers for sysfs
32 * interactions, beyond just all the standard stuff. Drivers who want to expose
33 * additional sysfs properties and property groups can attach them at either
34 * &drm_device.dev or &drm_connector.kdev.
35 *
36 * Registration is automatically handled when calling drm_dev_register(), or
37 * drm_connector_register() in case of hot-plugged connectors. Unregistration is
38 * also automatically handled by drm_dev_unregister() and
39 * drm_connector_unregister().
40 */
41
28static struct device_type drm_sysfs_device_minor = { 42static struct device_type drm_sysfs_device_minor = {
29 .name = "drm_minor" 43 .name = "drm_minor"
30}; 44};
@@ -250,15 +264,6 @@ static const struct attribute_group *connector_dev_groups[] = {
250 NULL 264 NULL
251}; 265};
252 266
253/**
254 * drm_sysfs_connector_add - add a connector to sysfs
255 * @connector: connector to add
256 *
257 * Create a connector device in sysfs, along with its associated connector
258 * properties (so far, connection status, dpms, mode list and edid) and
259 * generate a hotplug event so userspace knows there's a new connector
260 * available.
261 */
262int drm_sysfs_connector_add(struct drm_connector *connector) 267int drm_sysfs_connector_add(struct drm_connector *connector)
263{ 268{
264 struct drm_device *dev = connector->dev; 269 struct drm_device *dev = connector->dev;
@@ -285,19 +290,6 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
285 return 0; 290 return 0;
286} 291}
287 292
288/**
289 * drm_sysfs_connector_remove - remove an connector device from sysfs
290 * @connector: connector to remove
291 *
292 * Remove @connector and its associated attributes from sysfs. Note that
293 * the device model core will take care of sending the "remove" uevent
294 * at this time, so we don't need to do it.
295 *
296 * Note:
297 * This routine should only be called if the connector was previously
298 * successfully registered. If @connector hasn't been registered yet,
299 * you'll likely see a panic somewhere deep in sysfs code when called.
300 */
301void drm_sysfs_connector_remove(struct drm_connector *connector) 293void drm_sysfs_connector_remove(struct drm_connector *connector)
302{ 294{
303 if (!connector->kdev) 295 if (!connector->kdev)
@@ -333,20 +325,6 @@ static void drm_sysfs_release(struct device *dev)
333 kfree(dev); 325 kfree(dev);
334} 326}
335 327
336/**
337 * drm_sysfs_minor_alloc() - Allocate sysfs device for given minor
338 * @minor: minor to allocate sysfs device for
339 *
340 * This allocates a new sysfs device for @minor and returns it. The device is
341 * not registered nor linked. The caller has to use device_add() and
342 * device_del() to register and unregister it.
343 *
344 * Note that dev_get_drvdata() on the new device will return the minor.
345 * However, the device does not hold a ref-count to the minor nor to the
346 * underlying drm_device. This is unproblematic as long as you access the
347 * private data only in sysfs callbacks. device_del() disables those
348 * synchronously, so they cannot be called after you cleanup a minor.
349 */
350struct device *drm_sysfs_minor_alloc(struct drm_minor *minor) 328struct device *drm_sysfs_minor_alloc(struct drm_minor *minor)
351{ 329{
352 const char *minor_str; 330 const char *minor_str;
@@ -384,15 +362,13 @@ err_free:
384} 362}
385 363
386/** 364/**
387 * drm_class_device_register - Register a struct device in the drm class. 365 * drm_class_device_register - register new device with the DRM sysfs class
366 * @dev: device to register
388 * 367 *
389 * @dev: pointer to struct device to register. 368 * Registers a new &struct device within the DRM sysfs class. Essentially only
390 * 369 * used by ttm to have a place for its global settings. Drivers should never use
391 * @dev should have all relevant members pre-filled with the exception 370 * this.
392 * of the class member. In particular, the device_type member must
393 * be set.
394 */ 371 */
395
396int drm_class_device_register(struct device *dev) 372int drm_class_device_register(struct device *dev)
397{ 373{
398 if (!drm_class || IS_ERR(drm_class)) 374 if (!drm_class || IS_ERR(drm_class))
@@ -403,6 +379,14 @@ int drm_class_device_register(struct device *dev)
403} 379}
404EXPORT_SYMBOL_GPL(drm_class_device_register); 380EXPORT_SYMBOL_GPL(drm_class_device_register);
405 381
382/**
383 * drm_class_device_unregister - unregister device with the DRM sysfs class
384 * @dev: device to unregister
385 *
386 * Unregisters a &struct device from the DRM sysfs class. Essentially only used
387 * by ttm to have a place for its global settings. Drivers should never use
388 * this.
389 */
406void drm_class_device_unregister(struct device *dev) 390void drm_class_device_unregister(struct device *dev)
407{ 391{
408 return device_unregister(dev); 392 return device_unregister(dev);
diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig
index cc1731c5289c..71cee4e9fefb 100644
--- a/drivers/gpu/drm/etnaviv/Kconfig
+++ b/drivers/gpu/drm/etnaviv/Kconfig
@@ -5,6 +5,7 @@ config DRM_ETNAVIV
5 depends on ARCH_MXC || ARCH_DOVE || (ARM && COMPILE_TEST) 5 depends on ARCH_MXC || ARCH_DOVE || (ARM && COMPILE_TEST)
6 depends on MMU 6 depends on MMU
7 select SHMEM 7 select SHMEM
8 select SYNC_FILE
8 select TMPFS 9 select TMPFS
9 select IOMMU_API 10 select IOMMU_API
10 select IOMMU_SUPPORT 11 select IOMMU_SUPPORT
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 587e45043542..5255278dde56 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -111,7 +111,7 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
111 return 0; 111 return 0;
112} 112}
113 113
114static void etnaviv_preclose(struct drm_device *dev, struct drm_file *file) 114static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
115{ 115{
116 struct etnaviv_drm_private *priv = dev->dev_private; 116 struct etnaviv_drm_private *priv = dev->dev_private;
117 struct etnaviv_file_private *ctx = file->driver_priv; 117 struct etnaviv_file_private *ctx = file->driver_priv;
@@ -488,7 +488,7 @@ static struct drm_driver etnaviv_drm_driver = {
488 DRIVER_PRIME | 488 DRIVER_PRIME |
489 DRIVER_RENDER, 489 DRIVER_RENDER,
490 .open = etnaviv_open, 490 .open = etnaviv_open,
491 .preclose = etnaviv_preclose, 491 .postclose = etnaviv_postclose,
492 .gem_free_object_unlocked = etnaviv_gem_free_object, 492 .gem_free_object_unlocked = etnaviv_gem_free_object,
493 .gem_vm_ops = &vm_ops, 493 .gem_vm_ops = &vm_ops,
494 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 494 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -512,7 +512,7 @@ static struct drm_driver etnaviv_drm_driver = {
512 .desc = "etnaviv DRM", 512 .desc = "etnaviv DRM",
513 .date = "20151214", 513 .date = "20151214",
514 .major = 1, 514 .major = 1,
515 .minor = 0, 515 .minor = 1,
516}; 516};
517 517
518/* 518/*
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index e63ff116a3b3..c4a091e87426 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -20,6 +20,7 @@
20#include <linux/reservation.h> 20#include <linux/reservation.h>
21#include "etnaviv_drv.h" 21#include "etnaviv_drv.h"
22 22
23struct dma_fence;
23struct etnaviv_gem_ops; 24struct etnaviv_gem_ops;
24struct etnaviv_gem_object; 25struct etnaviv_gem_object;
25 26
@@ -104,9 +105,10 @@ struct etnaviv_gem_submit {
104 struct drm_device *dev; 105 struct drm_device *dev;
105 struct etnaviv_gpu *gpu; 106 struct etnaviv_gpu *gpu;
106 struct ww_acquire_ctx ticket; 107 struct ww_acquire_ctx ticket;
107 u32 fence; 108 struct dma_fence *fence;
108 unsigned int nr_bos; 109 unsigned int nr_bos;
109 struct etnaviv_gem_submit_bo bos[0]; 110 struct etnaviv_gem_submit_bo bos[0];
111 u32 flags;
110}; 112};
111 113
112int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, 114int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 726090d7a6ac..e1909429837e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -14,7 +14,9 @@
14 * this program. If not, see <http://www.gnu.org/licenses/>. 14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */ 15 */
16 16
17#include <linux/dma-fence-array.h>
17#include <linux/reservation.h> 18#include <linux/reservation.h>
19#include <linux/sync_file.h>
18#include "etnaviv_cmdbuf.h" 20#include "etnaviv_cmdbuf.h"
19#include "etnaviv_drv.h" 21#include "etnaviv_drv.h"
20#include "etnaviv_gpu.h" 22#include "etnaviv_gpu.h"
@@ -169,8 +171,10 @@ static int submit_fence_sync(const struct etnaviv_gem_submit *submit)
169 for (i = 0; i < submit->nr_bos; i++) { 171 for (i = 0; i < submit->nr_bos; i++) {
170 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; 172 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
171 bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE; 173 bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
174 bool explicit = !(submit->flags & ETNA_SUBMIT_NO_IMPLICIT);
172 175
173 ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write); 176 ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write,
177 explicit);
174 if (ret) 178 if (ret)
175 break; 179 break;
176 } 180 }
@@ -290,6 +294,7 @@ static void submit_cleanup(struct etnaviv_gem_submit *submit)
290 } 294 }
291 295
292 ww_acquire_fini(&submit->ticket); 296 ww_acquire_fini(&submit->ticket);
297 dma_fence_put(submit->fence);
293 kfree(submit); 298 kfree(submit);
294} 299}
295 300
@@ -303,6 +308,9 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
303 struct etnaviv_gem_submit *submit; 308 struct etnaviv_gem_submit *submit;
304 struct etnaviv_cmdbuf *cmdbuf; 309 struct etnaviv_cmdbuf *cmdbuf;
305 struct etnaviv_gpu *gpu; 310 struct etnaviv_gpu *gpu;
311 struct dma_fence *in_fence = NULL;
312 struct sync_file *sync_file = NULL;
313 int out_fence_fd = -1;
306 void *stream; 314 void *stream;
307 int ret; 315 int ret;
308 316
@@ -326,6 +334,11 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
326 return -EINVAL; 334 return -EINVAL;
327 } 335 }
328 336
337 if (args->flags & ~ETNA_SUBMIT_FLAGS) {
338 DRM_ERROR("invalid flags: 0x%x\n", args->flags);
339 return -EINVAL;
340 }
341
329 /* 342 /*
330 * Copy the command submission and bo array to kernel space in 343 * Copy the command submission and bo array to kernel space in
331 * one go, and do this outside of any locks. 344 * one go, and do this outside of any locks.
@@ -365,12 +378,22 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
365 goto err_submit_cmds; 378 goto err_submit_cmds;
366 } 379 }
367 380
381 if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
382 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
383 if (out_fence_fd < 0) {
384 ret = out_fence_fd;
385 goto err_submit_cmds;
386 }
387 }
388
368 submit = submit_create(dev, gpu, args->nr_bos); 389 submit = submit_create(dev, gpu, args->nr_bos);
369 if (!submit) { 390 if (!submit) {
370 ret = -ENOMEM; 391 ret = -ENOMEM;
371 goto err_submit_cmds; 392 goto err_submit_cmds;
372 } 393 }
373 394
395 submit->flags = args->flags;
396
374 ret = submit_lookup_objects(submit, file, bos, args->nr_bos); 397 ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
375 if (ret) 398 if (ret)
376 goto err_submit_objects; 399 goto err_submit_objects;
@@ -385,6 +408,24 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
385 goto err_submit_objects; 408 goto err_submit_objects;
386 } 409 }
387 410
411 if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) {
412 in_fence = sync_file_get_fence(args->fence_fd);
413 if (!in_fence) {
414 ret = -EINVAL;
415 goto err_submit_objects;
416 }
417
418 /*
419 * Wait if the fence is from a foreign context, or if the fence
420 * array contains any fence from a foreign context.
421 */
422 if (!dma_fence_match_context(in_fence, gpu->fence_context)) {
423 ret = dma_fence_wait(in_fence, true);
424 if (ret)
425 goto err_submit_objects;
426 }
427 }
428
388 ret = submit_fence_sync(submit); 429 ret = submit_fence_sync(submit);
389 if (ret) 430 if (ret)
390 goto err_submit_objects; 431 goto err_submit_objects;
@@ -405,7 +446,23 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
405 if (ret == 0) 446 if (ret == 0)
406 cmdbuf = NULL; 447 cmdbuf = NULL;
407 448
408 args->fence = submit->fence; 449 if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
450 /*
451 * This can be improved: ideally we want to allocate the sync
452 * file before kicking off the GPU job and just attach the
453 * fence to the sync file here, eliminating the ENOMEM
454 * possibility at this stage.
455 */
456 sync_file = sync_file_create(submit->fence);
457 if (!sync_file) {
458 ret = -ENOMEM;
459 goto out;
460 }
461 fd_install(out_fence_fd, sync_file->file);
462 }
463
464 args->fence_fd = out_fence_fd;
465 args->fence = submit->fence->seqno;
409 466
410out: 467out:
411 submit_unpin_objects(submit); 468 submit_unpin_objects(submit);
@@ -419,9 +476,13 @@ out:
419 flush_workqueue(priv->wq); 476 flush_workqueue(priv->wq);
420 477
421err_submit_objects: 478err_submit_objects:
479 if (in_fence)
480 dma_fence_put(in_fence);
422 submit_cleanup(submit); 481 submit_cleanup(submit);
423 482
424err_submit_cmds: 483err_submit_cmds:
484 if (ret && (out_fence_fd >= 0))
485 put_unused_fd(out_fence_fd);
425 /* if we still own the cmdbuf */ 486 /* if we still own the cmdbuf */
426 if (cmdbuf) 487 if (cmdbuf)
427 etnaviv_cmdbuf_free(cmdbuf); 488 etnaviv_cmdbuf_free(cmdbuf);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 130d7d517a19..9a9c40717801 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -18,6 +18,7 @@
18#include <linux/dma-fence.h> 18#include <linux/dma-fence.h>
19#include <linux/moduleparam.h> 19#include <linux/moduleparam.h>
20#include <linux/of_device.h> 20#include <linux/of_device.h>
21#include <linux/thermal.h>
21 22
22#include "etnaviv_cmdbuf.h" 23#include "etnaviv_cmdbuf.h"
23#include "etnaviv_dump.h" 24#include "etnaviv_dump.h"
@@ -409,6 +410,17 @@ static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
409 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock); 410 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
410} 411}
411 412
413static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
414{
415 unsigned int fscale = 1 << (6 - gpu->freq_scale);
416 u32 clock;
417
418 clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
419 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
420
421 etnaviv_gpu_load_clock(gpu, clock);
422}
423
412static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) 424static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
413{ 425{
414 u32 control, idle; 426 u32 control, idle;
@@ -426,11 +438,10 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
426 timeout = jiffies + msecs_to_jiffies(1000); 438 timeout = jiffies + msecs_to_jiffies(1000);
427 439
428 while (time_is_after_jiffies(timeout)) { 440 while (time_is_after_jiffies(timeout)) {
429 control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
430 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
431
432 /* enable clock */ 441 /* enable clock */
433 etnaviv_gpu_load_clock(gpu, control); 442 etnaviv_gpu_update_clock(gpu);
443
444 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
434 445
435 /* Wait for stable clock. Vivante's code waited for 1ms */ 446 /* Wait for stable clock. Vivante's code waited for 1ms */
436 usleep_range(1000, 10000); 447 usleep_range(1000, 10000);
@@ -490,11 +501,7 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
490 } 501 }
491 502
492 /* We rely on the GPU running, so program the clock */ 503 /* We rely on the GPU running, so program the clock */
493 control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | 504 etnaviv_gpu_update_clock(gpu);
494 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
495
496 /* enable clock */
497 etnaviv_gpu_load_clock(gpu, control);
498 505
499 return 0; 506 return 0;
500} 507}
@@ -1051,6 +1058,12 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
1051{ 1058{
1052 struct etnaviv_fence *f; 1059 struct etnaviv_fence *f;
1053 1060
1061 /*
1062 * GPU lock must already be held, otherwise fence completion order might
1063 * not match the seqno order assigned here.
1064 */
1065 lockdep_assert_held(&gpu->lock);
1066
1054 f = kzalloc(sizeof(*f), GFP_KERNEL); 1067 f = kzalloc(sizeof(*f), GFP_KERNEL);
1055 if (!f) 1068 if (!f)
1056 return NULL; 1069 return NULL;
@@ -1064,7 +1077,7 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
1064} 1077}
1065 1078
1066int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, 1079int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
1067 unsigned int context, bool exclusive) 1080 unsigned int context, bool exclusive, bool explicit)
1068{ 1081{
1069 struct reservation_object *robj = etnaviv_obj->resv; 1082 struct reservation_object *robj = etnaviv_obj->resv;
1070 struct reservation_object_list *fobj; 1083 struct reservation_object_list *fobj;
@@ -1077,6 +1090,9 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
1077 return ret; 1090 return ret;
1078 } 1091 }
1079 1092
1093 if (explicit)
1094 return 0;
1095
1080 /* 1096 /*
1081 * If we have any shared fences, then the exclusive fence 1097 * If we have any shared fences, then the exclusive fence
1082 * should be ignored as it will already have been signalled. 1098 * should be ignored as it will already have been signalled.
@@ -1311,18 +1327,18 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1311 goto out_pm_put; 1327 goto out_pm_put;
1312 } 1328 }
1313 1329
1330 mutex_lock(&gpu->lock);
1331
1314 fence = etnaviv_gpu_fence_alloc(gpu); 1332 fence = etnaviv_gpu_fence_alloc(gpu);
1315 if (!fence) { 1333 if (!fence) {
1316 event_free(gpu, event); 1334 event_free(gpu, event);
1317 ret = -ENOMEM; 1335 ret = -ENOMEM;
1318 goto out_pm_put; 1336 goto out_unlock;
1319 } 1337 }
1320 1338
1321 mutex_lock(&gpu->lock);
1322
1323 gpu->event[event].fence = fence; 1339 gpu->event[event].fence = fence;
1324 submit->fence = fence->seqno; 1340 submit->fence = dma_fence_get(fence);
1325 gpu->active_fence = submit->fence; 1341 gpu->active_fence = submit->fence->seqno;
1326 1342
1327 if (gpu->lastctx != cmdbuf->ctx) { 1343 if (gpu->lastctx != cmdbuf->ctx) {
1328 gpu->mmu->need_flush = true; 1344 gpu->mmu->need_flush = true;
@@ -1357,6 +1373,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1357 hangcheck_timer_reset(gpu); 1373 hangcheck_timer_reset(gpu);
1358 ret = 0; 1374 ret = 0;
1359 1375
1376out_unlock:
1360 mutex_unlock(&gpu->lock); 1377 mutex_unlock(&gpu->lock);
1361 1378
1362out_pm_put: 1379out_pm_put:
@@ -1526,17 +1543,13 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
1526#ifdef CONFIG_PM 1543#ifdef CONFIG_PM
1527static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu) 1544static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
1528{ 1545{
1529 u32 clock;
1530 int ret; 1546 int ret;
1531 1547
1532 ret = mutex_lock_killable(&gpu->lock); 1548 ret = mutex_lock_killable(&gpu->lock);
1533 if (ret) 1549 if (ret)
1534 return ret; 1550 return ret;
1535 1551
1536 clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | 1552 etnaviv_gpu_update_clock(gpu);
1537 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
1538
1539 etnaviv_gpu_load_clock(gpu, clock);
1540 etnaviv_gpu_hw_init(gpu); 1553 etnaviv_gpu_hw_init(gpu);
1541 1554
1542 gpu->switch_context = true; 1555 gpu->switch_context = true;
@@ -1548,6 +1561,47 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
1548} 1561}
1549#endif 1562#endif
1550 1563
1564static int
1565etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev,
1566 unsigned long *state)
1567{
1568 *state = 6;
1569
1570 return 0;
1571}
1572
1573static int
1574etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev,
1575 unsigned long *state)
1576{
1577 struct etnaviv_gpu *gpu = cdev->devdata;
1578
1579 *state = gpu->freq_scale;
1580
1581 return 0;
1582}
1583
1584static int
1585etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev,
1586 unsigned long state)
1587{
1588 struct etnaviv_gpu *gpu = cdev->devdata;
1589
1590 mutex_lock(&gpu->lock);
1591 gpu->freq_scale = state;
1592 if (!pm_runtime_suspended(gpu->dev))
1593 etnaviv_gpu_update_clock(gpu);
1594 mutex_unlock(&gpu->lock);
1595
1596 return 0;
1597}
1598
1599static struct thermal_cooling_device_ops cooling_ops = {
1600 .get_max_state = etnaviv_gpu_cooling_get_max_state,
1601 .get_cur_state = etnaviv_gpu_cooling_get_cur_state,
1602 .set_cur_state = etnaviv_gpu_cooling_set_cur_state,
1603};
1604
1551static int etnaviv_gpu_bind(struct device *dev, struct device *master, 1605static int etnaviv_gpu_bind(struct device *dev, struct device *master,
1552 void *data) 1606 void *data)
1553{ 1607{
@@ -1556,13 +1610,20 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
1556 struct etnaviv_gpu *gpu = dev_get_drvdata(dev); 1610 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1557 int ret; 1611 int ret;
1558 1612
1613 gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
1614 (char *)dev_name(dev), gpu, &cooling_ops);
1615 if (IS_ERR(gpu->cooling))
1616 return PTR_ERR(gpu->cooling);
1617
1559#ifdef CONFIG_PM 1618#ifdef CONFIG_PM
1560 ret = pm_runtime_get_sync(gpu->dev); 1619 ret = pm_runtime_get_sync(gpu->dev);
1561#else 1620#else
1562 ret = etnaviv_gpu_clk_enable(gpu); 1621 ret = etnaviv_gpu_clk_enable(gpu);
1563#endif 1622#endif
1564 if (ret < 0) 1623 if (ret < 0) {
1624 thermal_cooling_device_unregister(gpu->cooling);
1565 return ret; 1625 return ret;
1626 }
1566 1627
1567 gpu->drm = drm; 1628 gpu->drm = drm;
1568 gpu->fence_context = dma_fence_context_alloc(1); 1629 gpu->fence_context = dma_fence_context_alloc(1);
@@ -1616,6 +1677,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
1616 } 1677 }
1617 1678
1618 gpu->drm = NULL; 1679 gpu->drm = NULL;
1680
1681 thermal_cooling_device_unregister(gpu->cooling);
1682 gpu->cooling = NULL;
1619} 1683}
1620 1684
1621static const struct component_ops gpu_ops = { 1685static const struct component_ops gpu_ops = {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 1c0606ea7d5e..9227a9740447 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -97,6 +97,7 @@ struct etnaviv_cmdbuf;
97 97
98struct etnaviv_gpu { 98struct etnaviv_gpu {
99 struct drm_device *drm; 99 struct drm_device *drm;
100 struct thermal_cooling_device *cooling;
100 struct device *dev; 101 struct device *dev;
101 struct mutex lock; 102 struct mutex lock;
102 struct etnaviv_chip_identity identity; 103 struct etnaviv_chip_identity identity;
@@ -150,6 +151,7 @@ struct etnaviv_gpu {
150 u32 hangcheck_fence; 151 u32 hangcheck_fence;
151 u32 hangcheck_dma_addr; 152 u32 hangcheck_dma_addr;
152 struct work_struct recover_work; 153 struct work_struct recover_work;
154 unsigned int freq_scale;
153}; 155};
154 156
155static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data) 157static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
@@ -181,7 +183,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
181#endif 183#endif
182 184
183int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, 185int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
184 unsigned int context, bool exclusive); 186 unsigned int context, bool exclusive, bool implicit);
185 187
186void etnaviv_gpu_retire(struct etnaviv_gpu *gpu); 188void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
187int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu, 189int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index b445b50a5dc4..385537b726a6 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -23,6 +23,7 @@
23#include <drm/drmP.h> 23#include <drm/drmP.h>
24#include <drm/drm_crtc.h> 24#include <drm/drm_crtc.h>
25#include <drm/drm_crtc_helper.h> 25#include <drm/drm_crtc_helper.h>
26#include <drm/drm_of.h>
26#include <drm/drm_panel.h> 27#include <drm/drm_panel.h>
27 28
28#include <drm/bridge/analogix_dp.h> 29#include <drm/bridge/analogix_dp.h>
@@ -211,8 +212,11 @@ static const struct component_ops exynos_dp_ops = {
211static int exynos_dp_probe(struct platform_device *pdev) 212static int exynos_dp_probe(struct platform_device *pdev)
212{ 213{
213 struct device *dev = &pdev->dev; 214 struct device *dev = &pdev->dev;
214 struct device_node *np = NULL, *endpoint = NULL; 215 struct device_node *np;
215 struct exynos_dp_device *dp; 216 struct exynos_dp_device *dp;
217 struct drm_panel *panel;
218 struct drm_bridge *bridge;
219 int ret;
216 220
217 dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device), 221 dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
218 GFP_KERNEL); 222 GFP_KERNEL);
@@ -236,28 +240,13 @@ static int exynos_dp_probe(struct platform_device *pdev)
236 goto out; 240 goto out;
237 } 241 }
238 242
239 endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); 243 ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0, &panel, &bridge);
240 if (endpoint) { 244 if (ret)
241 np = of_graph_get_remote_port_parent(endpoint); 245 return ret;
242 if (np) { 246
243 /* The remote port can be either a panel or a bridge */ 247 /* The remote port can be either a panel or a bridge */
244 dp->plat_data.panel = of_drm_find_panel(np); 248 dp->plat_data.panel = panel;
245 if (!dp->plat_data.panel) { 249 dp->ptn_bridge = bridge;
246 dp->ptn_bridge = of_drm_find_bridge(np);
247 if (!dp->ptn_bridge) {
248 of_node_put(np);
249 return -EPROBE_DEFER;
250 }
251 }
252 of_node_put(np);
253 } else {
254 DRM_ERROR("no remote endpoint device node found.\n");
255 return -EINVAL;
256 }
257 } else {
258 DRM_ERROR("no port endpoint subnode found.\n");
259 return -EINVAL;
260 }
261 250
262out: 251out:
263 return component_add(&pdev->dev, &exynos_dp_ops); 252 return component_add(&pdev->dev, &exynos_dp_ops);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 3aab71a485ba..63abcd280fa0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -163,27 +163,13 @@ enum {
163 FIMD_PORT_WRB, 163 FIMD_PORT_WRB,
164}; 164};
165 165
166static struct device_node *exynos_dpi_of_find_panel_node(struct device *dev)
167{
168 struct device_node *np, *ep;
169
170 ep = of_graph_get_endpoint_by_regs(dev->of_node, FIMD_PORT_RGB, 0);
171 if (!ep)
172 return NULL;
173
174 np = of_graph_get_remote_port_parent(ep);
175 of_node_put(ep);
176
177 return np;
178}
179
180static int exynos_dpi_parse_dt(struct exynos_dpi *ctx) 166static int exynos_dpi_parse_dt(struct exynos_dpi *ctx)
181{ 167{
182 struct device *dev = ctx->dev; 168 struct device *dev = ctx->dev;
183 struct device_node *dn = dev->of_node; 169 struct device_node *dn = dev->of_node;
184 struct device_node *np; 170 struct device_node *np;
185 171
186 ctx->panel_node = exynos_dpi_of_find_panel_node(dev); 172 ctx->panel_node = of_graph_get_remote_node(dn, FIMD_PORT_RGB, 0);
187 173
188 np = of_get_child_by_name(dn, "display-timings"); 174 np = of_get_child_by_name(dn, "display-timings");
189 if (np) { 175 if (np) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 6d4da2f0932d..fc4fda738906 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1659,17 +1659,10 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
1659 1659
1660 of_node_put(ep); 1660 of_node_put(ep);
1661 1661
1662 ep = of_graph_get_next_endpoint(node, NULL); 1662 dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_OUT, 0);
1663 if (!ep) { 1663 if (!dsi->bridge_node)
1664 ret = -EINVAL; 1664 return -EINVAL;
1665 goto end;
1666 }
1667 1665
1668 dsi->bridge_node = of_graph_get_remote_port_parent(ep);
1669 if (!dsi->bridge_node) {
1670 ret = -EINVAL;
1671 goto end;
1672 }
1673end: 1666end:
1674 of_node_put(ep); 1667 of_node_put(ep);
1675 1668
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 2ef43d403eaa..e45720543a45 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -229,29 +229,6 @@ static void mic_set_reg_on(struct exynos_mic *mic, bool enable)
229 writel(reg, mic->reg + MIC_OP); 229 writel(reg, mic->reg + MIC_OP);
230} 230}
231 231
232static struct device_node *get_remote_node(struct device_node *from, int reg)
233{
234 struct device_node *endpoint = NULL, *remote_node = NULL;
235
236 endpoint = of_graph_get_endpoint_by_regs(from, reg, -1);
237 if (!endpoint) {
238 DRM_ERROR("mic: Failed to find remote port from %s",
239 from->full_name);
240 goto exit;
241 }
242
243 remote_node = of_graph_get_remote_port_parent(endpoint);
244 if (!remote_node) {
245 DRM_ERROR("mic: Failed to find remote port parent from %s",
246 from->full_name);
247 goto exit;
248 }
249
250exit:
251 of_node_put(endpoint);
252 return remote_node;
253}
254
255static int parse_dt(struct exynos_mic *mic) 232static int parse_dt(struct exynos_mic *mic)
256{ 233{
257 int ret = 0, i, j; 234 int ret = 0, i, j;
@@ -263,7 +240,7 @@ static int parse_dt(struct exynos_mic *mic)
263 * The first node must be for decon and the second one must be for dsi. 240 * The first node must be for decon and the second one must be for dsi.
264 */ 241 */
265 for (i = 0, j = 0; i < NUM_ENDPOINTS; i++) { 242 for (i = 0, j = 0; i < NUM_ENDPOINTS; i++) {
266 remote_node = get_remote_node(mic->dev->of_node, i); 243 remote_node = of_graph_get_remote_node(mic->dev->of_node, i, 0);
267 if (!remote_node) { 244 if (!remote_node) {
268 ret = -EPIPE; 245 ret = -EPIPE;
269 goto exit; 246 goto exit;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index c3651456c963..dcbf3c06e1d8 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -15,6 +15,7 @@
15#include <drm/drmP.h> 15#include <drm/drmP.h>
16#include <drm/drm_atomic_helper.h> 16#include <drm/drm_atomic_helper.h>
17#include <drm/drm_crtc_helper.h> 17#include <drm/drm_crtc_helper.h>
18#include <drm/drm_of.h>
18#include <drm/drm_panel.h> 19#include <drm/drm_panel.h>
19 20
20#include "fsl_dcu_drm_drv.h" 21#include "fsl_dcu_drm_drv.h"
@@ -141,32 +142,11 @@ err_cleanup:
141 return ret; 142 return ret;
142} 143}
143 144
144static int fsl_dcu_attach_endpoint(struct fsl_dcu_drm_device *fsl_dev,
145 const struct of_endpoint *ep)
146{
147 struct drm_bridge *bridge;
148 struct device_node *np;
149
150 np = of_graph_get_remote_port_parent(ep->local_node);
151
152 fsl_dev->connector.panel = of_drm_find_panel(np);
153 if (fsl_dev->connector.panel) {
154 of_node_put(np);
155 return fsl_dcu_attach_panel(fsl_dev, fsl_dev->connector.panel);
156 }
157
158 bridge = of_drm_find_bridge(np);
159 of_node_put(np);
160 if (!bridge)
161 return -ENODEV;
162
163 return drm_bridge_attach(&fsl_dev->encoder, bridge, NULL);
164}
165
166int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev) 145int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev)
167{ 146{
168 struct of_endpoint ep; 147 struct device_node *panel_node;
169 struct device_node *ep_node, *panel_node; 148 struct drm_panel *panel;
149 struct drm_bridge *bridge;
170 int ret; 150 int ret;
171 151
172 /* This is for backward compatibility */ 152 /* This is for backward compatibility */
@@ -179,14 +159,14 @@ int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev)
179 return fsl_dcu_attach_panel(fsl_dev, fsl_dev->connector.panel); 159 return fsl_dcu_attach_panel(fsl_dev, fsl_dev->connector.panel);
180 } 160 }
181 161
182 ep_node = of_graph_get_next_endpoint(fsl_dev->np, NULL); 162 ret = drm_of_find_panel_or_bridge(fsl_dev->np, 0, 0, &panel, &bridge);
183 if (!ep_node)
184 return -ENODEV;
185
186 ret = of_graph_parse_endpoint(ep_node, &ep);
187 of_node_put(ep_node);
188 if (ret) 163 if (ret)
189 return -ENODEV; 164 return ret;
165
166 if (panel) {
167 fsl_dev->connector.panel = panel;
168 return fsl_dcu_attach_panel(fsl_dev, panel);
169 }
190 170
191 return fsl_dcu_attach_endpoint(fsl_dev, &ep); 171 return drm_bridge_attach(&fsl_dev->encoder, bridge, NULL);
192} 172}
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index 93ff46535c04..e7fd356acf2e 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -177,7 +177,8 @@ void gma_crtc_load_lut(struct drm_crtc *crtc)
177} 177}
178 178
179int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, 179int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
180 u32 size) 180 u32 size,
181 struct drm_modeset_acquire_ctx *ctx)
181{ 182{
182 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 183 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
183 int i; 184 int i;
diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h
index 166e608923db..239c374b6169 100644
--- a/drivers/gpu/drm/gma500/gma_display.h
+++ b/drivers/gpu/drm/gma500/gma_display.h
@@ -73,7 +73,8 @@ extern int gma_crtc_cursor_set(struct drm_crtc *crtc,
73extern int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); 73extern int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
74extern void gma_crtc_load_lut(struct drm_crtc *crtc); 74extern void gma_crtc_load_lut(struct drm_crtc *crtc);
75extern int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 75extern int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
76 u16 *blue, u32 size); 76 u16 *blue, u32 size,
77 struct drm_modeset_acquire_ctx *ctx);
77extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode); 78extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode);
78extern void gma_crtc_prepare(struct drm_crtc *crtc); 79extern void gma_crtc_prepare(struct drm_crtc *crtc);
79extern void gma_crtc_commit(struct drm_crtc *crtc); 80extern void gma_crtc_commit(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index 1737e98bc10a..5abc69c9630f 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -17,7 +17,6 @@
17 17
18#include <linux/clk.h> 18#include <linux/clk.h>
19#include <linux/component.h> 19#include <linux/component.h>
20#include <linux/of_graph.h>
21 20
22#include <drm/drm_of.h> 21#include <drm/drm_of.h>
23#include <drm/drm_crtc_helper.h> 22#include <drm/drm_crtc_helper.h>
@@ -754,34 +753,16 @@ static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
754{ 753{
755 struct dsi_hw_ctx *ctx = dsi->ctx; 754 struct dsi_hw_ctx *ctx = dsi->ctx;
756 struct device_node *np = pdev->dev.of_node; 755 struct device_node *np = pdev->dev.of_node;
757 struct device_node *endpoint, *bridge_node;
758 struct drm_bridge *bridge;
759 struct resource *res; 756 struct resource *res;
757 int ret;
760 758
761 /* 759 /*
762 * Get the endpoint node. In our case, dsi has one output port1 760 * Get the endpoint node. In our case, dsi has one output port1
763 * to which the external HDMI bridge is connected. 761 * to which the external HDMI bridge is connected.
764 */ 762 */
765 endpoint = of_graph_get_endpoint_by_regs(np, 1, -1); 763 ret = drm_of_find_panel_or_bridge(np, 0, 0, NULL, &dsi->bridge);
766 if (!endpoint) { 764 if (ret)
767 DRM_ERROR("no valid endpoint node\n"); 765 return ret;
768 return -ENODEV;
769 }
770 of_node_put(endpoint);
771
772 bridge_node = of_graph_get_remote_port_parent(endpoint);
773 if (!bridge_node) {
774 DRM_ERROR("no valid bridge node\n");
775 return -ENODEV;
776 }
777 of_node_put(bridge_node);
778
779 bridge = of_drm_find_bridge(bridge_node);
780 if (!bridge) {
781 DRM_INFO("wait for external HDMI bridge driver.\n");
782 return -EPROBE_DEFER;
783 }
784 dsi->bridge = bridge;
785 766
786 ctx->pclk = devm_clk_get(&pdev->dev, "pclk"); 767 ctx->pclk = devm_clk_get(&pdev->dev, "pclk");
787 if (IS_ERR(ctx->pclk)) { 768 if (IS_ERR(ctx->pclk)) {
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index df4f50713e54..9c903672f582 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -230,34 +230,6 @@ static const struct component_master_ops kirin_drm_ops = {
230 .unbind = kirin_drm_unbind, 230 .unbind = kirin_drm_unbind,
231}; 231};
232 232
233static struct device_node *kirin_get_remote_node(struct device_node *np)
234{
235 struct device_node *endpoint, *remote;
236
237 /* get the first endpoint, in our case only one remote node
238 * is connected to display controller.
239 */
240 endpoint = of_graph_get_next_endpoint(np, NULL);
241 if (!endpoint) {
242 DRM_ERROR("no valid endpoint node\n");
243 return ERR_PTR(-ENODEV);
244 }
245
246 remote = of_graph_get_remote_port_parent(endpoint);
247 of_node_put(endpoint);
248 if (!remote) {
249 DRM_ERROR("no valid remote node\n");
250 return ERR_PTR(-ENODEV);
251 }
252
253 if (!of_device_is_available(remote)) {
254 DRM_ERROR("not available for remote node\n");
255 return ERR_PTR(-ENODEV);
256 }
257
258 return remote;
259}
260
261static int kirin_drm_platform_probe(struct platform_device *pdev) 233static int kirin_drm_platform_probe(struct platform_device *pdev)
262{ 234{
263 struct device *dev = &pdev->dev; 235 struct device *dev = &pdev->dev;
@@ -271,7 +243,7 @@ static int kirin_drm_platform_probe(struct platform_device *pdev)
271 return -EINVAL; 243 return -EINVAL;
272 } 244 }
273 245
274 remote = kirin_get_remote_node(np); 246 remote = of_graph_get_remote_node(np, 0, 0);
275 if (IS_ERR(remote)) 247 if (IS_ERR(remote))
276 return PTR_ERR(remote); 248 return PTR_ERR(remote);
277 249
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index b7d7721e72fa..40af17ec6312 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -285,9 +285,6 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
285{ 285{
286 int ret; 286 int ret;
287 287
288 if (vgpu->failsafe)
289 return 0;
290
291 if (WARN_ON(bytes > 4)) 288 if (WARN_ON(bytes > 4))
292 return -EINVAL; 289 return -EINVAL;
293 290
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 6f972afbdbc3..41b2c3aaa04a 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -616,9 +616,6 @@ static inline u32 get_opcode(u32 cmd, int ring_id)
616{ 616{
617 struct decode_info *d_info; 617 struct decode_info *d_info;
618 618
619 if (ring_id >= I915_NUM_ENGINES)
620 return INVALID_OP;
621
622 d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)]; 619 d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
623 if (d_info == NULL) 620 if (d_info == NULL)
624 return INVALID_OP; 621 return INVALID_OP;
@@ -661,9 +658,6 @@ static inline void print_opcode(u32 cmd, int ring_id)
661 struct decode_info *d_info; 658 struct decode_info *d_info;
662 int i; 659 int i;
663 660
664 if (ring_id >= I915_NUM_ENGINES)
665 return;
666
667 d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)]; 661 d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
668 if (d_info == NULL) 662 if (d_info == NULL)
669 return; 663 return;
@@ -1215,7 +1209,7 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
1215 if (!info->async_flip) 1209 if (!info->async_flip)
1216 return 0; 1210 return 0;
1217 1211
1218 if (IS_SKYLAKE(dev_priv)) { 1212 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
1219 stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0); 1213 stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0);
1220 tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & 1214 tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) &
1221 GENMASK(12, 10)) >> 10; 1215 GENMASK(12, 10)) >> 10;
@@ -1243,7 +1237,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
1243 1237
1244 set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12), 1238 set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
1245 info->surf_val << 12); 1239 info->surf_val << 12);
1246 if (IS_SKYLAKE(dev_priv)) { 1240 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
1247 set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0), 1241 set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
1248 info->stride_val); 1242 info->stride_val);
1249 set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10), 1243 set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
@@ -1267,7 +1261,7 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
1267 1261
1268 if (IS_BROADWELL(dev_priv)) 1262 if (IS_BROADWELL(dev_priv))
1269 return gen8_decode_mi_display_flip(s, info); 1263 return gen8_decode_mi_display_flip(s, info);
1270 if (IS_SKYLAKE(dev_priv)) 1264 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
1271 return skl_decode_mi_display_flip(s, info); 1265 return skl_decode_mi_display_flip(s, info);
1272 1266
1273 return -ENODEV; 1267 return -ENODEV;
@@ -1278,7 +1272,9 @@ static int check_mi_display_flip(struct parser_exec_state *s,
1278{ 1272{
1279 struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; 1273 struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1280 1274
1281 if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv)) 1275 if (IS_BROADWELL(dev_priv)
1276 || IS_SKYLAKE(dev_priv)
1277 || IS_KABYLAKE(dev_priv))
1282 return gen8_check_mi_display_flip(s, info); 1278 return gen8_check_mi_display_flip(s, info);
1283 return -ENODEV; 1279 return -ENODEV;
1284} 1280}
@@ -1289,7 +1285,9 @@ static int update_plane_mmio_from_mi_display_flip(
1289{ 1285{
1290 struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; 1286 struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1291 1287
1292 if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv)) 1288 if (IS_BROADWELL(dev_priv)
1289 || IS_SKYLAKE(dev_priv)
1290 || IS_KABYLAKE(dev_priv))
1293 return gen8_update_plane_mmio_from_mi_display_flip(s, info); 1291 return gen8_update_plane_mmio_from_mi_display_flip(s, info);
1294 return -ENODEV; 1292 return -ENODEV;
1295} 1293}
@@ -1569,7 +1567,8 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
1569{ 1567{
1570 struct intel_gvt *gvt = s->vgpu->gvt; 1568 struct intel_gvt *gvt = s->vgpu->gvt;
1571 1569
1572 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { 1570 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
1571 || IS_KABYLAKE(gvt->dev_priv)) {
1573 /* BDW decides privilege based on address space */ 1572 /* BDW decides privilege based on address space */
1574 if (cmd_val(s, 0) & (1 << 8)) 1573 if (cmd_val(s, 0) & (1 << 8))
1575 return 0; 1574 return 0;
@@ -2478,7 +2477,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
2478 2477
2479 t1 = get_cycles(); 2478 t1 = get_cycles();
2480 2479
2481 memcpy(&s_before_advance_custom, s, sizeof(struct parser_exec_state)); 2480 s_before_advance_custom = *s;
2482 2481
2483 if (info->handler) { 2482 if (info->handler) {
2484 ret = info->handler(s); 2483 ret = info->handler(s);
@@ -2604,6 +2603,9 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2604 unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail; 2603 unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
2605 struct parser_exec_state s; 2604 struct parser_exec_state s;
2606 int ret = 0; 2605 int ret = 0;
2606 struct intel_vgpu_workload *workload = container_of(wa_ctx,
2607 struct intel_vgpu_workload,
2608 wa_ctx);
2607 2609
2608 /* ring base is page aligned */ 2610 /* ring base is page aligned */
2609 if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE))) 2611 if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE)))
@@ -2618,14 +2620,14 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2618 2620
2619 s.buf_type = RING_BUFFER_INSTRUCTION; 2621 s.buf_type = RING_BUFFER_INSTRUCTION;
2620 s.buf_addr_type = GTT_BUFFER; 2622 s.buf_addr_type = GTT_BUFFER;
2621 s.vgpu = wa_ctx->workload->vgpu; 2623 s.vgpu = workload->vgpu;
2622 s.ring_id = wa_ctx->workload->ring_id; 2624 s.ring_id = workload->ring_id;
2623 s.ring_start = wa_ctx->indirect_ctx.guest_gma; 2625 s.ring_start = wa_ctx->indirect_ctx.guest_gma;
2624 s.ring_size = ring_size; 2626 s.ring_size = ring_size;
2625 s.ring_head = gma_head; 2627 s.ring_head = gma_head;
2626 s.ring_tail = gma_tail; 2628 s.ring_tail = gma_tail;
2627 s.rb_va = wa_ctx->indirect_ctx.shadow_va; 2629 s.rb_va = wa_ctx->indirect_ctx.shadow_va;
2628 s.workload = wa_ctx->workload; 2630 s.workload = workload;
2629 2631
2630 ret = ip_gma_set(&s, gma_head); 2632 ret = ip_gma_set(&s, gma_head);
2631 if (ret) 2633 if (ret)
@@ -2708,12 +2710,15 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2708{ 2710{
2709 int ctx_size = wa_ctx->indirect_ctx.size; 2711 int ctx_size = wa_ctx->indirect_ctx.size;
2710 unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma; 2712 unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
2711 struct intel_vgpu *vgpu = wa_ctx->workload->vgpu; 2713 struct intel_vgpu_workload *workload = container_of(wa_ctx,
2714 struct intel_vgpu_workload,
2715 wa_ctx);
2716 struct intel_vgpu *vgpu = workload->vgpu;
2712 struct drm_i915_gem_object *obj; 2717 struct drm_i915_gem_object *obj;
2713 int ret = 0; 2718 int ret = 0;
2714 void *map; 2719 void *map;
2715 2720
2716 obj = i915_gem_object_create(wa_ctx->workload->vgpu->gvt->dev_priv, 2721 obj = i915_gem_object_create(workload->vgpu->gvt->dev_priv,
2717 roundup(ctx_size + CACHELINE_BYTES, 2722 roundup(ctx_size + CACHELINE_BYTES,
2718 PAGE_SIZE)); 2723 PAGE_SIZE));
2719 if (IS_ERR(obj)) 2724 if (IS_ERR(obj))
@@ -2733,8 +2738,8 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2733 goto unmap_src; 2738 goto unmap_src;
2734 } 2739 }
2735 2740
2736 ret = copy_gma_to_hva(wa_ctx->workload->vgpu, 2741 ret = copy_gma_to_hva(workload->vgpu,
2737 wa_ctx->workload->vgpu->gtt.ggtt_mm, 2742 workload->vgpu->gtt.ggtt_mm,
2738 guest_gma, guest_gma + ctx_size, 2743 guest_gma, guest_gma + ctx_size,
2739 map); 2744 map);
2740 if (ret < 0) { 2745 if (ret < 0) {
@@ -2772,7 +2777,10 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2772int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) 2777int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2773{ 2778{
2774 int ret; 2779 int ret;
2775 struct intel_vgpu *vgpu = wa_ctx->workload->vgpu; 2780 struct intel_vgpu_workload *workload = container_of(wa_ctx,
2781 struct intel_vgpu_workload,
2782 wa_ctx);
2783 struct intel_vgpu *vgpu = workload->vgpu;
2776 2784
2777 if (wa_ctx->indirect_ctx.size == 0) 2785 if (wa_ctx->indirect_ctx.size == 0)
2778 return 0; 2786 return 0;
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 5419ae6ec633..e0261fcc5b50 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -161,8 +161,9 @@ static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = {
161 161
162#define DPCD_HEADER_SIZE 0xb 162#define DPCD_HEADER_SIZE 0xb
163 163
164/* let the virtual display supports DP1.2 */
164static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = { 165static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
165 0x11, 0x0a, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 166 0x12, 0x014, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
166}; 167};
167 168
168static void emulate_monitor_status_change(struct intel_vgpu *vgpu) 169static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
@@ -172,26 +173,64 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
172 SDE_PORTC_HOTPLUG_CPT | 173 SDE_PORTC_HOTPLUG_CPT |
173 SDE_PORTD_HOTPLUG_CPT); 174 SDE_PORTD_HOTPLUG_CPT);
174 175
175 if (IS_SKYLAKE(dev_priv)) 176 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
176 vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT | 177 vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
177 SDE_PORTE_HOTPLUG_SPT); 178 SDE_PORTE_HOTPLUG_SPT);
179 vgpu_vreg(vgpu, SKL_FUSE_STATUS) |=
180 SKL_FUSE_DOWNLOAD_STATUS |
181 SKL_FUSE_PG0_DIST_STATUS |
182 SKL_FUSE_PG1_DIST_STATUS |
183 SKL_FUSE_PG2_DIST_STATUS;
184 vgpu_vreg(vgpu, LCPLL1_CTL) |=
185 LCPLL_PLL_ENABLE |
186 LCPLL_PLL_LOCK;
187 vgpu_vreg(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE;
188
189 }
178 190
179 if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { 191 if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
180 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
181 vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; 192 vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
193 vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
194 ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
195 TRANS_DDI_PORT_MASK);
196 vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
197 (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
198 (PORT_B << TRANS_DDI_PORT_SHIFT) |
199 TRANS_DDI_FUNC_ENABLE);
200 vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE;
201 vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE;
202 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
182 } 203 }
183 204
184 if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { 205 if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
185 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT; 206 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
207 vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
208 ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
209 TRANS_DDI_PORT_MASK);
210 vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
211 (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
212 (PORT_C << TRANS_DDI_PORT_SHIFT) |
213 TRANS_DDI_FUNC_ENABLE);
214 vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE;
215 vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE;
186 vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; 216 vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
187 } 217 }
188 218
189 if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) { 219 if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
190 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; 220 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
221 vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
222 ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
223 TRANS_DDI_PORT_MASK);
224 vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
225 (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
226 (PORT_D << TRANS_DDI_PORT_SHIFT) |
227 TRANS_DDI_FUNC_ENABLE);
228 vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE;
229 vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE;
191 vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; 230 vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
192 } 231 }
193 232
194 if (IS_SKYLAKE(dev_priv) && 233 if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
195 intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) { 234 intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
196 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT; 235 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
197 } 236 }
@@ -353,7 +392,7 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
353{ 392{
354 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 393 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
355 394
356 if (IS_SKYLAKE(dev_priv)) 395 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
357 clean_virtual_dp_monitor(vgpu, PORT_D); 396 clean_virtual_dp_monitor(vgpu, PORT_D);
358 else 397 else
359 clean_virtual_dp_monitor(vgpu, PORT_B); 398 clean_virtual_dp_monitor(vgpu, PORT_B);
@@ -375,7 +414,7 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
375 414
376 intel_vgpu_init_i2c_edid(vgpu); 415 intel_vgpu_init_i2c_edid(vgpu);
377 416
378 if (IS_SKYLAKE(dev_priv)) 417 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
379 return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D, 418 return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
380 resolution); 419 resolution);
381 else 420 else
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index f1648fe5e5ea..42cd09ec63fa 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -495,7 +495,8 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
495 unsigned char val = edid_get_byte(vgpu); 495 unsigned char val = edid_get_byte(vgpu);
496 496
497 aux_data_for_write = (val << 16); 497 aux_data_for_write = (val << 16);
498 } 498 } else
499 aux_data_for_write = (0xff << 16);
499 } 500 }
500 /* write the return value in AUX_CH_DATA reg which includes: 501 /* write the return value in AUX_CH_DATA reg which includes:
501 * ACK of I2C_WRITE 502 * ACK of I2C_WRITE
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index f1f426a97aa9..dca989eb2d42 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -56,8 +56,8 @@ static int context_switch_events[] = {
56 56
57static int ring_id_to_context_switch_event(int ring_id) 57static int ring_id_to_context_switch_event(int ring_id)
58{ 58{
59 if (WARN_ON(ring_id < RCS && ring_id > 59 if (WARN_ON(ring_id < RCS ||
60 ARRAY_SIZE(context_switch_events))) 60 ring_id >= ARRAY_SIZE(context_switch_events)))
61 return -EINVAL; 61 return -EINVAL;
62 62
63 return context_switch_events[ring_id]; 63 return context_switch_events[ring_id];
@@ -394,9 +394,11 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
394 394
395static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) 395static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
396{ 396{
397 int ring_id = wa_ctx->workload->ring_id; 397 struct intel_vgpu_workload *workload = container_of(wa_ctx,
398 struct i915_gem_context *shadow_ctx = 398 struct intel_vgpu_workload,
399 wa_ctx->workload->vgpu->shadow_ctx; 399 wa_ctx);
400 int ring_id = workload->ring_id;
401 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
400 struct drm_i915_gem_object *ctx_obj = 402 struct drm_i915_gem_object *ctx_obj =
401 shadow_ctx->engine[ring_id].state->obj; 403 shadow_ctx->engine[ring_id].state->obj;
402 struct execlist_ring_context *shadow_ring_context; 404 struct execlist_ring_context *shadow_ring_context;
@@ -680,15 +682,12 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
680 CACHELINE_BYTES; 682 CACHELINE_BYTES;
681 workload->wa_ctx.per_ctx.guest_gma = 683 workload->wa_ctx.per_ctx.guest_gma =
682 per_ctx & PER_CTX_ADDR_MASK; 684 per_ctx & PER_CTX_ADDR_MASK;
683 workload->wa_ctx.workload = workload;
684 685
685 WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1)); 686 WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1));
686 } 687 }
687 688
688 if (emulate_schedule_in) 689 if (emulate_schedule_in)
689 memcpy(&workload->elsp_dwords, 690 workload->elsp_dwords = vgpu->execlist[ring_id].elsp_dwords;
690 &vgpu->execlist[ring_id].elsp_dwords,
691 sizeof(workload->elsp_dwords));
692 691
693 gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n", 692 gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
694 workload, ring_id, head, tail, start, ctl); 693 workload, ring_id, head, tail, start, ctl);
@@ -775,7 +774,8 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
775 _EL_OFFSET_STATUS_PTR); 774 _EL_OFFSET_STATUS_PTR);
776 775
777 ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); 776 ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
778 ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7; 777 ctx_status_ptr.read_ptr = 0;
778 ctx_status_ptr.write_ptr = 0x7;
779 vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; 779 vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
780} 780}
781 781
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 933a7c211a1c..dce8d15f706f 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -75,11 +75,11 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
75 struct gvt_firmware_header *h; 75 struct gvt_firmware_header *h;
76 void *firmware; 76 void *firmware;
77 void *p; 77 void *p;
78 unsigned long size; 78 unsigned long size, crc32_start;
79 int i; 79 int i;
80 int ret; 80 int ret;
81 81
82 size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1; 82 size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
83 firmware = vzalloc(size); 83 firmware = vzalloc(size);
84 if (!firmware) 84 if (!firmware)
85 return -ENOMEM; 85 return -ENOMEM;
@@ -112,6 +112,9 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
112 112
113 memcpy(gvt->firmware.mmio, p, info->mmio_size); 113 memcpy(gvt->firmware.mmio, p, info->mmio_size);
114 114
115 crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
116 h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start);
117
115 firmware_attr.size = size; 118 firmware_attr.size = size;
116 firmware_attr.private = firmware; 119 firmware_attr.private = firmware;
117 120
@@ -234,7 +237,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
234 237
235 firmware->mmio = mem; 238 firmware->mmio = mem;
236 239
237 sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state", 240 sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%02x.golden_hw_state",
238 GVT_FIRMWARE_PATH, pdev->vendor, pdev->device, 241 GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
239 pdev->revision); 242 pdev->revision);
240 243
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index da7312715824..c6f0077f590d 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1837,11 +1837,15 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1837 ret = gtt_entry_p2m(vgpu, &e, &m); 1837 ret = gtt_entry_p2m(vgpu, &e, &m);
1838 if (ret) { 1838 if (ret) {
1839 gvt_vgpu_err("fail to translate guest gtt entry\n"); 1839 gvt_vgpu_err("fail to translate guest gtt entry\n");
1840 return ret; 1840 /* guest driver may read/write the entry when partial
1841 * update the entry in this situation p2m will fail
1842 * settting the shadow entry to point to a scratch page
1843 */
1844 ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
1841 } 1845 }
1842 } else { 1846 } else {
1843 m = e; 1847 m = e;
1844 m.val64 = 0; 1848 ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
1845 } 1849 }
1846 1850
1847 ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index); 1851 ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
@@ -2220,7 +2224,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
2220 2224
2221 gvt_dbg_core("init gtt\n"); 2225 gvt_dbg_core("init gtt\n");
2222 2226
2223 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { 2227 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
2228 || IS_KABYLAKE(gvt->dev_priv)) {
2224 gvt->gtt.pte_ops = &gen8_gtt_pte_ops; 2229 gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
2225 gvt->gtt.gma_ops = &gen8_gtt_gma_ops; 2230 gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
2226 gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table; 2231 gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table;
@@ -2289,12 +2294,15 @@ void intel_gvt_clean_gtt(struct intel_gvt *gvt)
2289void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) 2294void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
2290{ 2295{
2291 struct intel_gvt *gvt = vgpu->gvt; 2296 struct intel_gvt *gvt = vgpu->gvt;
2297 struct drm_i915_private *dev_priv = gvt->dev_priv;
2292 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 2298 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2293 u32 index; 2299 u32 index;
2294 u32 offset; 2300 u32 offset;
2295 u32 num_entries; 2301 u32 num_entries;
2296 struct intel_gvt_gtt_entry e; 2302 struct intel_gvt_gtt_entry e;
2297 2303
2304 intel_runtime_pm_get(dev_priv);
2305
2298 memset(&e, 0, sizeof(struct intel_gvt_gtt_entry)); 2306 memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
2299 e.type = GTT_TYPE_GGTT_PTE; 2307 e.type = GTT_TYPE_GGTT_PTE;
2300 ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn); 2308 ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
@@ -2309,6 +2317,8 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
2309 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; 2317 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2310 for (offset = 0; offset < num_entries; offset++) 2318 for (offset = 0; offset < num_entries; offset++)
2311 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); 2319 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
2320
2321 intel_runtime_pm_put(dev_priv);
2312} 2322}
2313 2323
2314/** 2324/**
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 3b9d59e457ba..7dea5e5d5567 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -52,6 +52,8 @@ static const struct intel_gvt_ops intel_gvt_ops = {
52 .vgpu_create = intel_gvt_create_vgpu, 52 .vgpu_create = intel_gvt_create_vgpu,
53 .vgpu_destroy = intel_gvt_destroy_vgpu, 53 .vgpu_destroy = intel_gvt_destroy_vgpu,
54 .vgpu_reset = intel_gvt_reset_vgpu, 54 .vgpu_reset = intel_gvt_reset_vgpu,
55 .vgpu_activate = intel_gvt_activate_vgpu,
56 .vgpu_deactivate = intel_gvt_deactivate_vgpu,
55}; 57};
56 58
57/** 59/**
@@ -106,7 +108,8 @@ static void init_device_info(struct intel_gvt *gvt)
106 struct intel_gvt_device_info *info = &gvt->device_info; 108 struct intel_gvt_device_info *info = &gvt->device_info;
107 struct pci_dev *pdev = gvt->dev_priv->drm.pdev; 109 struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
108 110
109 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { 111 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
112 || IS_KABYLAKE(gvt->dev_priv)) {
110 info->max_support_vgpus = 8; 113 info->max_support_vgpus = 8;
111 info->cfg_space_size = 256; 114 info->cfg_space_size = 256;
112 info->mmio_size = 2 * 1024 * 1024; 115 info->mmio_size = 2 * 1024 * 1024;
@@ -143,6 +146,11 @@ static int gvt_service_thread(void *data)
143 intel_gvt_emulate_vblank(gvt); 146 intel_gvt_emulate_vblank(gvt);
144 mutex_unlock(&gvt->lock); 147 mutex_unlock(&gvt->lock);
145 } 148 }
149
150 if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
151 (void *)&gvt->service_request)) {
152 intel_gvt_schedule(gvt);
153 }
146 } 154 }
147 155
148 return 0; 156 return 0;
@@ -196,6 +204,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
196 204
197 idr_destroy(&gvt->vgpu_idr); 205 idr_destroy(&gvt->vgpu_idr);
198 206
207 intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
208
199 kfree(dev_priv->gvt); 209 kfree(dev_priv->gvt);
200 dev_priv->gvt = NULL; 210 dev_priv->gvt = NULL;
201} 211}
@@ -214,6 +224,7 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
214int intel_gvt_init_device(struct drm_i915_private *dev_priv) 224int intel_gvt_init_device(struct drm_i915_private *dev_priv)
215{ 225{
216 struct intel_gvt *gvt; 226 struct intel_gvt *gvt;
227 struct intel_vgpu *vgpu;
217 int ret; 228 int ret;
218 229
219 /* 230 /*
@@ -286,6 +297,14 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
286 goto out_clean_types; 297 goto out_clean_types;
287 } 298 }
288 299
300 vgpu = intel_gvt_create_idle_vgpu(gvt);
301 if (IS_ERR(vgpu)) {
302 ret = PTR_ERR(vgpu);
303 gvt_err("failed to create idle vgpu\n");
304 goto out_clean_types;
305 }
306 gvt->idle_vgpu = vgpu;
307
289 gvt_dbg_core("gvt device initialization is done\n"); 308 gvt_dbg_core("gvt device initialization is done\n");
290 dev_priv->gvt = gvt; 309 dev_priv->gvt = gvt;
291 return 0; 310 return 0;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 6dfc48b63b71..930732e5c780 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -138,6 +138,10 @@ struct intel_vgpu_display {
138 struct intel_vgpu_sbi sbi; 138 struct intel_vgpu_sbi sbi;
139}; 139};
140 140
141struct vgpu_sched_ctl {
142 int weight;
143};
144
141struct intel_vgpu { 145struct intel_vgpu {
142 struct intel_gvt *gvt; 146 struct intel_gvt *gvt;
143 int id; 147 int id;
@@ -147,6 +151,7 @@ struct intel_vgpu {
147 bool failsafe; 151 bool failsafe;
148 bool resetting; 152 bool resetting;
149 void *sched_data; 153 void *sched_data;
154 struct vgpu_sched_ctl sched_ctl;
150 155
151 struct intel_vgpu_fence fence; 156 struct intel_vgpu_fence fence;
152 struct intel_vgpu_gm gm; 157 struct intel_vgpu_gm gm;
@@ -160,6 +165,7 @@ struct intel_vgpu {
160 struct list_head workload_q_head[I915_NUM_ENGINES]; 165 struct list_head workload_q_head[I915_NUM_ENGINES];
161 struct kmem_cache *workloads; 166 struct kmem_cache *workloads;
162 atomic_t running_workload_num; 167 atomic_t running_workload_num;
168 ktime_t last_ctx_submit_time;
163 DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); 169 DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
164 struct i915_gem_context *shadow_ctx; 170 struct i915_gem_context *shadow_ctx;
165 171
@@ -215,6 +221,7 @@ struct intel_vgpu_type {
215 unsigned int low_gm_size; 221 unsigned int low_gm_size;
216 unsigned int high_gm_size; 222 unsigned int high_gm_size;
217 unsigned int fence; 223 unsigned int fence;
224 unsigned int weight;
218 enum intel_vgpu_edid resolution; 225 enum intel_vgpu_edid resolution;
219}; 226};
220 227
@@ -236,6 +243,7 @@ struct intel_gvt {
236 DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS); 243 DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
237 struct intel_vgpu_type *types; 244 struct intel_vgpu_type *types;
238 unsigned int num_types; 245 unsigned int num_types;
246 struct intel_vgpu *idle_vgpu;
239 247
240 struct task_struct *service_thread; 248 struct task_struct *service_thread;
241 wait_queue_head_t service_thread_wq; 249 wait_queue_head_t service_thread_wq;
@@ -249,6 +257,7 @@ static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
249 257
250enum { 258enum {
251 INTEL_GVT_REQUEST_EMULATE_VBLANK = 0, 259 INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
260 INTEL_GVT_REQUEST_SCHED = 1,
252}; 261};
253 262
254static inline void intel_gvt_request_service(struct intel_gvt *gvt, 263static inline void intel_gvt_request_service(struct intel_gvt *gvt,
@@ -322,6 +331,8 @@ struct intel_vgpu_creation_params {
322 __u64 resolution; 331 __u64 resolution;
323 __s32 primary; 332 __s32 primary;
324 __u64 vgpu_id; 333 __u64 vgpu_id;
334
335 __u32 weight;
325}; 336};
326 337
327int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, 338int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
@@ -376,13 +387,16 @@ static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
376int intel_gvt_init_vgpu_types(struct intel_gvt *gvt); 387int intel_gvt_init_vgpu_types(struct intel_gvt *gvt);
377void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt); 388void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
378 389
390struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt);
391void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
379struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, 392struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
380 struct intel_vgpu_type *type); 393 struct intel_vgpu_type *type);
381void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); 394void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
382void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, 395void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
383 unsigned int engine_mask); 396 unsigned int engine_mask);
384void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); 397void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
385 398void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
399void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
386 400
387/* validating GM functions */ 401/* validating GM functions */
388#define vgpu_gmadr_is_aperture(vgpu, gmadr) \ 402#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
@@ -449,6 +463,8 @@ struct intel_gvt_ops {
449 struct intel_vgpu_type *); 463 struct intel_vgpu_type *);
450 void (*vgpu_destroy)(struct intel_vgpu *); 464 void (*vgpu_destroy)(struct intel_vgpu *);
451 void (*vgpu_reset)(struct intel_vgpu *); 465 void (*vgpu_reset)(struct intel_vgpu *);
466 void (*vgpu_activate)(struct intel_vgpu *);
467 void (*vgpu_deactivate)(struct intel_vgpu *);
452}; 468};
453 469
454 470
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index eaff45d417e8..0ad1a508e2af 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -68,6 +68,8 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
68 return D_BDW; 68 return D_BDW;
69 else if (IS_SKYLAKE(gvt->dev_priv)) 69 else if (IS_SKYLAKE(gvt->dev_priv))
70 return D_SKL; 70 return D_SKL;
71 else if (IS_KABYLAKE(gvt->dev_priv))
72 return D_KBL;
71 73
72 return 0; 74 return 0;
73} 75}
@@ -234,7 +236,8 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
234 old = vgpu_vreg(vgpu, offset); 236 old = vgpu_vreg(vgpu, offset);
235 new = CALC_MODE_MASK_REG(old, *(u32 *)p_data); 237 new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
236 238
237 if (IS_SKYLAKE(vgpu->gvt->dev_priv)) { 239 if (IS_SKYLAKE(vgpu->gvt->dev_priv)
240 || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
238 switch (offset) { 241 switch (offset) {
239 case FORCEWAKE_RENDER_GEN9_REG: 242 case FORCEWAKE_RENDER_GEN9_REG:
240 ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG; 243 ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
@@ -823,8 +826,9 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
823 write_vreg(vgpu, offset, p_data, bytes); 826 write_vreg(vgpu, offset, p_data, bytes);
824 data = vgpu_vreg(vgpu, offset); 827 data = vgpu_vreg(vgpu, offset);
825 828
826 if (IS_SKYLAKE(vgpu->gvt->dev_priv) && 829 if ((IS_SKYLAKE(vgpu->gvt->dev_priv)
827 offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) { 830 || IS_KABYLAKE(vgpu->gvt->dev_priv))
831 && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
828 /* SKL DPB/C/D aux ctl register changed */ 832 /* SKL DPB/C/D aux ctl register changed */
829 return 0; 833 return 0;
830 } else if (IS_BROADWELL(vgpu->gvt->dev_priv) && 834 } else if (IS_BROADWELL(vgpu->gvt->dev_priv) &&
@@ -970,6 +974,14 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
970 return 0; 974 return 0;
971} 975}
972 976
977static int mbctl_write(struct intel_vgpu *vgpu, unsigned int offset,
978 void *p_data, unsigned int bytes)
979{
980 *(u32 *)p_data &= (~GEN6_MBCTL_ENABLE_BOOT_FETCH);
981 write_vreg(vgpu, offset, p_data, bytes);
982 return 0;
983}
984
973static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 985static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
974 void *p_data, unsigned int bytes) 986 void *p_data, unsigned int bytes)
975{ 987{
@@ -1303,7 +1315,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
1303 1315
1304 switch (cmd) { 1316 switch (cmd) {
1305 case GEN9_PCODE_READ_MEM_LATENCY: 1317 case GEN9_PCODE_READ_MEM_LATENCY:
1306 if (IS_SKYLAKE(vgpu->gvt->dev_priv)) { 1318 if (IS_SKYLAKE(vgpu->gvt->dev_priv)
1319 || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
1307 /** 1320 /**
1308 * "Read memory latency" command on gen9. 1321 * "Read memory latency" command on gen9.
1309 * Below memory latency values are read 1322 * Below memory latency values are read
@@ -1316,7 +1329,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
1316 } 1329 }
1317 break; 1330 break;
1318 case SKL_PCODE_CDCLK_CONTROL: 1331 case SKL_PCODE_CDCLK_CONTROL:
1319 if (IS_SKYLAKE(vgpu->gvt->dev_priv)) 1332 if (IS_SKYLAKE(vgpu->gvt->dev_priv)
1333 || IS_KABYLAKE(vgpu->gvt->dev_priv))
1320 *data0 = SKL_CDCLK_READY_FOR_CHANGE; 1334 *data0 = SKL_CDCLK_READY_FOR_CHANGE;
1321 break; 1335 break;
1322 case GEN6_PCODE_READ_RC6VIDS: 1336 case GEN6_PCODE_READ_RC6VIDS:
@@ -1410,6 +1424,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1410 1424
1411 execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data; 1425 execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
1412 if (execlist->elsp_dwords.index == 3) { 1426 if (execlist->elsp_dwords.index == 3) {
1427 vgpu->last_ctx_submit_time = ktime_get();
1413 ret = intel_vgpu_submit_execlist(vgpu, ring_id); 1428 ret = intel_vgpu_submit_execlist(vgpu, ring_id);
1414 if(ret) 1429 if(ret)
1415 gvt_vgpu_err("fail submit workload on ring %d\n", 1430 gvt_vgpu_err("fail submit workload on ring %d\n",
@@ -2238,7 +2253,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
2238 MMIO_D(0x7180, D_ALL); 2253 MMIO_D(0x7180, D_ALL);
2239 MMIO_D(0x7408, D_ALL); 2254 MMIO_D(0x7408, D_ALL);
2240 MMIO_D(0x7c00, D_ALL); 2255 MMIO_D(0x7c00, D_ALL);
2241 MMIO_D(GEN6_MBCTL, D_ALL); 2256 MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write);
2242 MMIO_D(0x911c, D_ALL); 2257 MMIO_D(0x911c, D_ALL);
2243 MMIO_D(0x9120, D_ALL); 2258 MMIO_D(0x9120, D_ALL);
2244 MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL); 2259 MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2584,219 +2599,232 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2584 MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); 2599 MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
2585 MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL); 2600 MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
2586 2601
2587 MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write); 2602 MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
2588 MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write); 2603 dp_aux_ch_ctl_mmio_write);
2589 MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write); 2604 MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
2605 dp_aux_ch_ctl_mmio_write);
2606 MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
2607 dp_aux_ch_ctl_mmio_write);
2590 2608
2591 MMIO_D(HSW_PWR_WELL_BIOS, D_SKL); 2609 MMIO_D(HSW_PWR_WELL_BIOS, D_SKL_PLUS);
2592 MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write); 2610 MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL_PLUS, NULL,
2611 skl_power_well_ctl_write);
2612 MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL_PLUS, NULL, mailbox_write);
2593 2613
2594 MMIO_D(0xa210, D_SKL_PLUS); 2614 MMIO_D(0xa210, D_SKL_PLUS);
2595 MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); 2615 MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
2596 MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); 2616 MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
2597 MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); 2617 MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2598 MMIO_DH(0x4ddc, D_SKL, NULL, skl_misc_ctl_write); 2618 MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, skl_misc_ctl_write);
2599 MMIO_DH(0x42080, D_SKL, NULL, skl_misc_ctl_write); 2619 MMIO_DH(0x42080, D_SKL_PLUS, NULL, skl_misc_ctl_write);
2600 MMIO_D(0x45504, D_SKL); 2620 MMIO_D(0x45504, D_SKL_PLUS);
2601 MMIO_D(0x45520, D_SKL); 2621 MMIO_D(0x45520, D_SKL_PLUS);
2602 MMIO_D(0x46000, D_SKL); 2622 MMIO_D(0x46000, D_SKL_PLUS);
2603 MMIO_DH(0x46010, D_SKL, NULL, skl_lcpll_write); 2623 MMIO_DH(0x46010, D_SKL | D_KBL, NULL, skl_lcpll_write);
2604 MMIO_DH(0x46014, D_SKL, NULL, skl_lcpll_write); 2624 MMIO_DH(0x46014, D_SKL | D_KBL, NULL, skl_lcpll_write);
2605 MMIO_D(0x6C040, D_SKL); 2625 MMIO_D(0x6C040, D_SKL | D_KBL);
2606 MMIO_D(0x6C048, D_SKL); 2626 MMIO_D(0x6C048, D_SKL | D_KBL);
2607 MMIO_D(0x6C050, D_SKL); 2627 MMIO_D(0x6C050, D_SKL | D_KBL);
2608 MMIO_D(0x6C044, D_SKL); 2628 MMIO_D(0x6C044, D_SKL | D_KBL);
2609 MMIO_D(0x6C04C, D_SKL); 2629 MMIO_D(0x6C04C, D_SKL | D_KBL);
2610 MMIO_D(0x6C054, D_SKL); 2630 MMIO_D(0x6C054, D_SKL | D_KBL);
2611 MMIO_D(0x6c058, D_SKL); 2631 MMIO_D(0x6c058, D_SKL | D_KBL);
2612 MMIO_D(0x6c05c, D_SKL); 2632 MMIO_D(0x6c05c, D_SKL | D_KBL);
2613 MMIO_DH(0X6c060, D_SKL, dpll_status_read, NULL); 2633 MMIO_DH(0X6c060, D_SKL | D_KBL, dpll_status_read, NULL);
2614 2634
2615 MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL, NULL, pf_write); 2635 MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
2616 MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL, NULL, pf_write); 2636 MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
2617 MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL, NULL, pf_write); 2637 MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
2618 MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL, NULL, pf_write); 2638 MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
2619 MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL, NULL, pf_write); 2639 MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
2620 MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL, NULL, pf_write); 2640 MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
2621 2641
2622 MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL, NULL, pf_write); 2642 MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
2623 MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL, NULL, pf_write); 2643 MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
2624 MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL, NULL, pf_write); 2644 MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
2625 MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL, NULL, pf_write); 2645 MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
2626 MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL, NULL, pf_write); 2646 MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
2627 MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL, NULL, pf_write); 2647 MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
2628 2648
2629 MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL, NULL, pf_write); 2649 MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
2630 MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL, NULL, pf_write); 2650 MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
2631 MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL, NULL, pf_write); 2651 MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
2632 MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL, NULL, pf_write); 2652 MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
2633 MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL, NULL, pf_write); 2653 MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
2634 MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL, NULL, pf_write); 2654 MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
2635 2655
2636 MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL); 2656 MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
2637 MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL); 2657 MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
2638 MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL); 2658 MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
2639 MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL); 2659 MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
2640 2660
2641 MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL); 2661 MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
2642 MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL); 2662 MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
2643 MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL); 2663 MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
2644 MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL); 2664 MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
2645 2665
2646 MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL); 2666 MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
2647 MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL); 2667 MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
2648 MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL); 2668 MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
2649 MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL); 2669 MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
2650 2670
2651 MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL, NULL, NULL); 2671 MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL_PLUS, NULL, NULL);
2652 MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL, NULL, NULL); 2672 MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL);
2653 MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL, NULL, NULL); 2673 MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL);
2654 2674
2655 MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); 2675 MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
2656 MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); 2676 MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
2657 MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); 2677 MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
2658 2678
2659 MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); 2679 MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
2660 MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); 2680 MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
2661 MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); 2681 MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
2662 2682
2663 MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); 2683 MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
2664 MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); 2684 MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
2665 MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); 2685 MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
2666 2686
2667 MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); 2687 MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
2668 MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); 2688 MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
2669 MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); 2689 MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
2670 2690
2671 MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL, NULL, NULL); 2691 MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
2672 MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL, NULL, NULL); 2692 MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
2673 MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL, NULL, NULL); 2693 MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
2674 2694
2675 MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL, NULL, NULL); 2695 MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
2676 MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL, NULL, NULL); 2696 MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
2677 MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL, NULL, NULL); 2697 MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
2678 2698
2679 MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL, NULL, NULL); 2699 MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
2680 MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL, NULL, NULL); 2700 MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
2681 MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL, NULL, NULL); 2701 MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
2682 2702
2683 MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL, NULL, NULL); 2703 MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL_PLUS, NULL, NULL);
2684 MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL, NULL, NULL); 2704 MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL_PLUS, NULL, NULL);
2685 MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL, NULL, NULL); 2705 MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL_PLUS, NULL, NULL);
2686 2706
2687 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL); 2707 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
2688 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL); 2708 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
2689 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL); 2709 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
2690 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL); 2710 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
2691 2711
2692 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL); 2712 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
2693 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL); 2713 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
2694 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL); 2714 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
2695 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL); 2715 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
2696 2716
2697 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL); 2717 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
2698 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL); 2718 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
2699 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL); 2719 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
2700 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL); 2720 MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
2701 2721
2702 MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL, NULL, NULL); 2722 MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
2703 MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL, NULL, NULL); 2723 MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
2704 MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL, NULL, NULL); 2724 MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
2705 MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL, NULL, NULL); 2725 MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL_PLUS, NULL, NULL);
2706 2726
2707 MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL, NULL, NULL); 2727 MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
2708 MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL, NULL, NULL); 2728 MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
2709 MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL, NULL, NULL); 2729 MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
2710 MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL, NULL, NULL); 2730 MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL_PLUS, NULL, NULL);
2711 2731
2712 MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL, NULL, NULL); 2732 MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
2713 MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL, NULL, NULL); 2733 MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
2714 MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL, NULL, NULL); 2734 MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
2715 MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL, NULL, NULL); 2735 MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL_PLUS, NULL, NULL);
2716 2736
2717 MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL, NULL, NULL); 2737 MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
2718 MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL, NULL, NULL); 2738 MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
2719 MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL, NULL, NULL); 2739 MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
2720 MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL, NULL, NULL); 2740 MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL_PLUS, NULL, NULL);
2721 2741
2722 MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL, NULL, NULL); 2742 MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
2723 MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL, NULL, NULL); 2743 MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
2724 MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL, NULL, NULL); 2744 MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
2725 MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL, NULL, NULL); 2745 MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL_PLUS, NULL, NULL);
2726 2746
2727 MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL, NULL, NULL); 2747 MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
2728 MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL, NULL, NULL); 2748 MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
2729 MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL, NULL, NULL); 2749 MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
2730 MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL, NULL, NULL); 2750 MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL_PLUS, NULL, NULL);
2731 2751
2732 MMIO_D(0x70380, D_SKL); 2752 MMIO_D(0x70380, D_SKL_PLUS);
2733 MMIO_D(0x71380, D_SKL); 2753 MMIO_D(0x71380, D_SKL_PLUS);
2734 MMIO_D(0x72380, D_SKL); 2754 MMIO_D(0x72380, D_SKL_PLUS);
2735 MMIO_D(0x7039c, D_SKL); 2755 MMIO_D(0x7039c, D_SKL_PLUS);
2736 2756
2737 MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL, NULL, NULL); 2757 MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
2738 MMIO_D(0x8f074, D_SKL); 2758 MMIO_D(0x8f074, D_SKL | D_KBL);
2739 MMIO_D(0x8f004, D_SKL); 2759 MMIO_D(0x8f004, D_SKL | D_KBL);
2740 MMIO_D(0x8f034, D_SKL); 2760 MMIO_D(0x8f034, D_SKL | D_KBL);
2741 2761
2742 MMIO_D(0xb11c, D_SKL); 2762 MMIO_D(0xb11c, D_SKL | D_KBL);
2743 2763
2744 MMIO_D(0x51000, D_SKL); 2764 MMIO_D(0x51000, D_SKL | D_KBL);
2745 MMIO_D(0x6c00c, D_SKL); 2765 MMIO_D(0x6c00c, D_SKL_PLUS);
2746 2766
2747 MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL); 2767 MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
2748 MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL); 2768 MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
2749 2769
2750 MMIO_D(0xd08, D_SKL); 2770 MMIO_D(0xd08, D_SKL_PLUS);
2751 MMIO_DFH(0x20e0, D_SKL, F_MODE_MASK, NULL, NULL); 2771 MMIO_DFH(0x20e0, D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
2752 MMIO_DFH(0x20ec, D_SKL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2772 MMIO_DFH(0x20ec, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2753 2773
2754 /* TRTT */ 2774 /* TRTT */
2755 MMIO_DFH(0x4de0, D_SKL, F_CMD_ACCESS, NULL, NULL); 2775 MMIO_DFH(0x4de0, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
2756 MMIO_DFH(0x4de4, D_SKL, F_CMD_ACCESS, NULL, NULL); 2776 MMIO_DFH(0x4de4, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
2757 MMIO_DFH(0x4de8, D_SKL, F_CMD_ACCESS, NULL, NULL); 2777 MMIO_DFH(0x4de8, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
2758 MMIO_DFH(0x4dec, D_SKL, F_CMD_ACCESS, NULL, NULL); 2778 MMIO_DFH(0x4dec, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
2759 MMIO_DFH(0x4df0, D_SKL, F_CMD_ACCESS, NULL, NULL); 2779 MMIO_DFH(0x4df0, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
2760 MMIO_DFH(0x4df4, D_SKL, F_CMD_ACCESS, NULL, gen9_trtte_write); 2780 MMIO_DFH(0x4df4, D_SKL | D_KBL, F_CMD_ACCESS, NULL, gen9_trtte_write);
2761 MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write); 2781 MMIO_DH(0x4dfc, D_SKL | D_KBL, NULL, gen9_trtt_chicken_write);
2762 2782
2763 MMIO_D(0x45008, D_SKL); 2783 MMIO_D(0x45008, D_SKL | D_KBL);
2764 2784
2765 MMIO_D(0x46430, D_SKL); 2785 MMIO_D(0x46430, D_SKL | D_KBL);
2766 2786
2767 MMIO_D(0x46520, D_SKL); 2787 MMIO_D(0x46520, D_SKL | D_KBL);
2768 2788
2769 MMIO_D(0xc403c, D_SKL); 2789 MMIO_D(0xc403c, D_SKL | D_KBL);
2770 MMIO_D(0xb004, D_SKL); 2790 MMIO_D(0xb004, D_SKL_PLUS);
2771 MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write); 2791 MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
2772 2792
2773 MMIO_D(0x65900, D_SKL); 2793 MMIO_D(0x65900, D_SKL_PLUS);
2774 MMIO_D(0x1082c0, D_SKL); 2794 MMIO_D(0x1082c0, D_SKL | D_KBL);
2775 MMIO_D(0x4068, D_SKL); 2795 MMIO_D(0x4068, D_SKL | D_KBL);
2776 MMIO_D(0x67054, D_SKL); 2796 MMIO_D(0x67054, D_SKL | D_KBL);
2777 MMIO_D(0x6e560, D_SKL); 2797 MMIO_D(0x6e560, D_SKL | D_KBL);
2778 MMIO_D(0x6e554, D_SKL); 2798 MMIO_D(0x6e554, D_SKL | D_KBL);
2779 MMIO_D(0x2b20, D_SKL); 2799 MMIO_D(0x2b20, D_SKL | D_KBL);
2780 MMIO_D(0x65f00, D_SKL); 2800 MMIO_D(0x65f00, D_SKL | D_KBL);
2781 MMIO_D(0x65f08, D_SKL); 2801 MMIO_D(0x65f08, D_SKL | D_KBL);
2782 MMIO_D(0x320f0, D_SKL); 2802 MMIO_D(0x320f0, D_SKL | D_KBL);
2783 2803
2784 MMIO_DFH(_REG_VCS2_EXCC, D_SKL, F_CMD_ACCESS, NULL, NULL); 2804 MMIO_DFH(_REG_VCS2_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2785 MMIO_D(0x70034, D_SKL); 2805 MMIO_DFH(_REG_VECS_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2786 MMIO_D(0x71034, D_SKL); 2806 MMIO_D(0x70034, D_SKL_PLUS);
2787 MMIO_D(0x72034, D_SKL); 2807 MMIO_D(0x71034, D_SKL_PLUS);
2788 2808 MMIO_D(0x72034, D_SKL_PLUS);
2789 MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL); 2809
2790 MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL); 2810 MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL_PLUS);
2791 MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL); 2811 MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL_PLUS);
2792 MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL); 2812 MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL_PLUS);
2793 MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL); 2813 MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL_PLUS);
2794 MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL); 2814 MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL_PLUS);
2795 2815 MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL_PLUS);
2796 MMIO_D(0x44500, D_SKL); 2816
2817 MMIO_D(0x44500, D_SKL_PLUS);
2797 MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); 2818 MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2798 MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL, F_MODE_MASK | F_CMD_ACCESS, 2819 MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL | D_KBL, F_MODE_MASK | F_CMD_ACCESS,
2799 NULL, NULL); 2820 NULL, NULL);
2821
2822 MMIO_D(0x4ab8, D_KBL);
2823 MMIO_D(0x940c, D_SKL_PLUS);
2824 MMIO_D(0x2248, D_SKL_PLUS | D_KBL);
2825 MMIO_D(0x4ab0, D_SKL | D_KBL);
2826 MMIO_D(0x20d4, D_SKL | D_KBL);
2827
2800 return 0; 2828 return 0;
2801} 2829}
2802 2830
@@ -2873,7 +2901,8 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
2873 ret = init_broadwell_mmio_info(gvt); 2901 ret = init_broadwell_mmio_info(gvt);
2874 if (ret) 2902 if (ret)
2875 goto err; 2903 goto err;
2876 } else if (IS_SKYLAKE(dev_priv)) { 2904 } else if (IS_SKYLAKE(dev_priv)
2905 || IS_KABYLAKE(dev_priv)) {
2877 ret = init_broadwell_mmio_info(gvt); 2906 ret = init_broadwell_mmio_info(gvt);
2878 if (ret) 2907 if (ret)
2879 goto err; 2908 goto err;
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 92bb247e3478..9d6812f0957f 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -580,7 +580,7 @@ static void gen8_init_irq(
580 580
581 SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); 581 SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
582 SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); 582 SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
583 } else if (IS_SKYLAKE(gvt->dev_priv)) { 583 } else if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) {
584 SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT); 584 SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
585 SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT); 585 SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
586 SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT); 586 SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
@@ -690,7 +690,8 @@ int intel_gvt_init_irq(struct intel_gvt *gvt)
690 690
691 gvt_dbg_core("init irq framework\n"); 691 gvt_dbg_core("init irq framework\n");
692 692
693 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { 693 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
694 || IS_KABYLAKE(gvt->dev_priv)) {
694 irq->ops = &gen8_irq_ops; 695 irq->ops = &gen8_irq_ops;
695 irq->irq_map = gen8_irq_map; 696 irq->irq_map = gen8_irq_map;
696 } else { 697 } else {
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 1ea3eb270de8..1ae0b4083ce1 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -295,10 +295,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
295 return 0; 295 return 0;
296 296
297 return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n" 297 return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
298 "fence: %d\nresolution: %s\n", 298 "fence: %d\nresolution: %s\n"
299 "weight: %d\n",
299 BYTES_TO_MB(type->low_gm_size), 300 BYTES_TO_MB(type->low_gm_size),
300 BYTES_TO_MB(type->high_gm_size), 301 BYTES_TO_MB(type->high_gm_size),
301 type->fence, vgpu_edid_str(type->resolution)); 302 type->fence, vgpu_edid_str(type->resolution),
303 type->weight);
302} 304}
303 305
304static MDEV_TYPE_ATTR_RO(available_instances); 306static MDEV_TYPE_ATTR_RO(available_instances);
@@ -544,6 +546,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
544 if (ret) 546 if (ret)
545 goto undo_group; 547 goto undo_group;
546 548
549 intel_gvt_ops->vgpu_activate(vgpu);
550
547 atomic_set(&vgpu->vdev.released, 0); 551 atomic_set(&vgpu->vdev.released, 0);
548 return ret; 552 return ret;
549 553
@@ -569,6 +573,8 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
569 if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1)) 573 if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
570 return; 574 return;
571 575
576 intel_gvt_ops->vgpu_deactivate(vgpu);
577
572 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY, 578 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
573 &vgpu->vdev.iommu_notifier); 579 &vgpu->vdev.iommu_notifier);
574 WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret); 580 WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
@@ -1146,8 +1152,40 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
1146 return 0; 1152 return 0;
1147} 1153}
1148 1154
1155static ssize_t
1156vgpu_id_show(struct device *dev, struct device_attribute *attr,
1157 char *buf)
1158{
1159 struct mdev_device *mdev = mdev_from_dev(dev);
1160
1161 if (mdev) {
1162 struct intel_vgpu *vgpu = (struct intel_vgpu *)
1163 mdev_get_drvdata(mdev);
1164 return sprintf(buf, "%d\n", vgpu->id);
1165 }
1166 return sprintf(buf, "\n");
1167}
1168
1169static DEVICE_ATTR_RO(vgpu_id);
1170
1171static struct attribute *intel_vgpu_attrs[] = {
1172 &dev_attr_vgpu_id.attr,
1173 NULL
1174};
1175
1176static const struct attribute_group intel_vgpu_group = {
1177 .name = "intel_vgpu",
1178 .attrs = intel_vgpu_attrs,
1179};
1180
1181static const struct attribute_group *intel_vgpu_groups[] = {
1182 &intel_vgpu_group,
1183 NULL,
1184};
1185
1149static const struct mdev_parent_ops intel_vgpu_ops = { 1186static const struct mdev_parent_ops intel_vgpu_ops = {
1150 .supported_type_groups = intel_vgpu_type_groups, 1187 .supported_type_groups = intel_vgpu_type_groups,
1188 .mdev_attr_groups = intel_vgpu_groups,
1151 .create = intel_vgpu_create, 1189 .create = intel_vgpu_create,
1152 .remove = intel_vgpu_remove, 1190 .remove = intel_vgpu_remove,
1153 1191
@@ -1326,6 +1364,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
1326 vgpu->handle = (unsigned long)info; 1364 vgpu->handle = (unsigned long)info;
1327 info->vgpu = vgpu; 1365 info->vgpu = vgpu;
1328 info->kvm = kvm; 1366 info->kvm = kvm;
1367 kvm_get_kvm(info->kvm);
1329 1368
1330 kvmgt_protect_table_init(info); 1369 kvmgt_protect_table_init(info);
1331 gvt_cache_init(vgpu); 1370 gvt_cache_init(vgpu);
@@ -1339,14 +1378,8 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
1339 1378
1340static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) 1379static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
1341{ 1380{
1342 struct intel_vgpu *vgpu = info->vgpu;
1343
1344 if (!info) {
1345 gvt_vgpu_err("kvmgt_guest_info invalid\n");
1346 return false;
1347 }
1348
1349 kvm_page_track_unregister_notifier(info->kvm, &info->track_node); 1381 kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
1382 kvm_put_kvm(info->kvm);
1350 kvmgt_protect_table_destroy(info); 1383 kvmgt_protect_table_destroy(info);
1351 gvt_cache_destroy(info->vgpu); 1384 gvt_cache_destroy(info->vgpu);
1352 vfree(info); 1385 vfree(info);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index a3a027025cd0..7edd66f38ef9 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -44,20 +44,21 @@ struct intel_vgpu;
44#define D_HSW (1 << 2) 44#define D_HSW (1 << 2)
45#define D_BDW (1 << 3) 45#define D_BDW (1 << 3)
46#define D_SKL (1 << 4) 46#define D_SKL (1 << 4)
47#define D_KBL (1 << 5)
47 48
48#define D_GEN9PLUS (D_SKL) 49#define D_GEN9PLUS (D_SKL | D_KBL)
49#define D_GEN8PLUS (D_BDW | D_SKL) 50#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL)
50#define D_GEN75PLUS (D_HSW | D_BDW | D_SKL) 51#define D_GEN75PLUS (D_HSW | D_BDW | D_SKL | D_KBL)
51#define D_GEN7PLUS (D_IVB | D_HSW | D_BDW | D_SKL) 52#define D_GEN7PLUS (D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
52 53
53#define D_SKL_PLUS (D_SKL) 54#define D_SKL_PLUS (D_SKL | D_KBL)
54#define D_BDW_PLUS (D_BDW | D_SKL) 55#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL)
55#define D_HSW_PLUS (D_HSW | D_BDW | D_SKL) 56#define D_HSW_PLUS (D_HSW | D_BDW | D_SKL | D_KBL)
56#define D_IVB_PLUS (D_IVB | D_HSW | D_BDW | D_SKL) 57#define D_IVB_PLUS (D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
57 58
58#define D_PRE_BDW (D_SNB | D_IVB | D_HSW) 59#define D_PRE_BDW (D_SNB | D_IVB | D_HSW)
59#define D_PRE_SKL (D_SNB | D_IVB | D_HSW | D_BDW) 60#define D_PRE_SKL (D_SNB | D_IVB | D_HSW | D_BDW)
60#define D_ALL (D_SNB | D_IVB | D_HSW | D_BDW | D_SKL) 61#define D_ALL (D_SNB | D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
61 62
62struct intel_gvt_mmio_info { 63struct intel_gvt_mmio_info {
63 u32 offset; 64 u32 offset;
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index 95ee091ce085..c6e7972ac21d 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -44,7 +44,7 @@ struct render_mmio {
44 u32 value; 44 u32 value;
45}; 45};
46 46
47static struct render_mmio gen8_render_mmio_list[] = { 47static struct render_mmio gen8_render_mmio_list[] __cacheline_aligned = {
48 {RCS, _MMIO(0x229c), 0xffff, false}, 48 {RCS, _MMIO(0x229c), 0xffff, false},
49 {RCS, _MMIO(0x2248), 0x0, false}, 49 {RCS, _MMIO(0x2248), 0x0, false},
50 {RCS, _MMIO(0x2098), 0x0, false}, 50 {RCS, _MMIO(0x2098), 0x0, false},
@@ -75,7 +75,7 @@ static struct render_mmio gen8_render_mmio_list[] = {
75 {BCS, _MMIO(0x22028), 0x0, false}, 75 {BCS, _MMIO(0x22028), 0x0, false},
76}; 76};
77 77
78static struct render_mmio gen9_render_mmio_list[] = { 78static struct render_mmio gen9_render_mmio_list[] __cacheline_aligned = {
79 {RCS, _MMIO(0x229c), 0xffff, false}, 79 {RCS, _MMIO(0x229c), 0xffff, false},
80 {RCS, _MMIO(0x2248), 0x0, false}, 80 {RCS, _MMIO(0x2248), 0x0, false},
81 {RCS, _MMIO(0x2098), 0x0, false}, 81 {RCS, _MMIO(0x2098), 0x0, false},
@@ -126,6 +126,18 @@ static struct render_mmio gen9_render_mmio_list[] = {
126 {VCS2, _MMIO(0x1c028), 0xffff, false}, 126 {VCS2, _MMIO(0x1c028), 0xffff, false},
127 127
128 {VECS, _MMIO(0x1a028), 0xffff, false}, 128 {VECS, _MMIO(0x1a028), 0xffff, false},
129
130 {RCS, _MMIO(0x7304), 0xffff, true},
131 {RCS, _MMIO(0x2248), 0x0, false},
132 {RCS, _MMIO(0x940c), 0x0, false},
133 {RCS, _MMIO(0x4ab8), 0x0, false},
134
135 {RCS, _MMIO(0x4ab0), 0x0, false},
136 {RCS, _MMIO(0x20d4), 0x0, false},
137
138 {RCS, _MMIO(0xb004), 0x0, false},
139 {RCS, _MMIO(0x20a0), 0x0, false},
140 {RCS, _MMIO(0x20e4), 0xffff, false},
129}; 141};
130 142
131static u32 gen9_render_mocs[I915_NUM_ENGINES][64]; 143static u32 gen9_render_mocs[I915_NUM_ENGINES][64];
@@ -159,7 +171,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
159 */ 171 */
160 fw = intel_uncore_forcewake_for_reg(dev_priv, reg, 172 fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
161 FW_REG_READ | FW_REG_WRITE); 173 FW_REG_READ | FW_REG_WRITE);
162 if (ring_id == RCS && IS_SKYLAKE(dev_priv)) 174 if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
163 fw |= FORCEWAKE_RENDER; 175 fw |= FORCEWAKE_RENDER;
164 176
165 intel_uncore_forcewake_get(dev_priv, fw); 177 intel_uncore_forcewake_get(dev_priv, fw);
@@ -192,9 +204,6 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
192 if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) 204 if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
193 return; 205 return;
194 206
195 if (!IS_SKYLAKE(dev_priv))
196 return;
197
198 offset.reg = regs[ring_id]; 207 offset.reg = regs[ring_id];
199 for (i = 0; i < 64; i++) { 208 for (i = 0; i < 64; i++) {
200 gen9_render_mocs[ring_id][i] = I915_READ(offset); 209 gen9_render_mocs[ring_id][i] = I915_READ(offset);
@@ -207,7 +216,7 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
207 l3_offset.reg = 0xb020; 216 l3_offset.reg = 0xb020;
208 for (i = 0; i < 32; i++) { 217 for (i = 0; i < 32; i++) {
209 gen9_render_mocs_L3[i] = I915_READ(l3_offset); 218 gen9_render_mocs_L3[i] = I915_READ(l3_offset);
210 I915_WRITE(l3_offset, vgpu_vreg(vgpu, offset)); 219 I915_WRITE(l3_offset, vgpu_vreg(vgpu, l3_offset));
211 POSTING_READ(l3_offset); 220 POSTING_READ(l3_offset);
212 l3_offset.reg += 4; 221 l3_offset.reg += 4;
213 } 222 }
@@ -230,9 +239,6 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
230 if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) 239 if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
231 return; 240 return;
232 241
233 if (!IS_SKYLAKE(dev_priv))
234 return;
235
236 offset.reg = regs[ring_id]; 242 offset.reg = regs[ring_id];
237 for (i = 0; i < 64; i++) { 243 for (i = 0; i < 64; i++) {
238 vgpu_vreg(vgpu, offset) = I915_READ(offset); 244 vgpu_vreg(vgpu, offset) = I915_READ(offset);
@@ -265,7 +271,8 @@ void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
265 u32 inhibit_mask = 271 u32 inhibit_mask =
266 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); 272 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
267 273
268 if (IS_SKYLAKE(vgpu->gvt->dev_priv)) { 274 if (IS_SKYLAKE(vgpu->gvt->dev_priv)
275 || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
269 mmio = gen9_render_mmio_list; 276 mmio = gen9_render_mmio_list;
270 array_size = ARRAY_SIZE(gen9_render_mmio_list); 277 array_size = ARRAY_SIZE(gen9_render_mmio_list);
271 load_mocs(vgpu, ring_id); 278 load_mocs(vgpu, ring_id);
@@ -312,7 +319,7 @@ void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
312 u32 v; 319 u32 v;
313 int i, array_size; 320 int i, array_size;
314 321
315 if (IS_SKYLAKE(dev_priv)) { 322 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
316 mmio = gen9_render_mmio_list; 323 mmio = gen9_render_mmio_list;
317 array_size = ARRAY_SIZE(gen9_render_mmio_list); 324 array_size = ARRAY_SIZE(gen9_render_mmio_list);
318 restore_mocs(vgpu, ring_id); 325 restore_mocs(vgpu, ring_id);
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 34b9acdf3479..79ba4b3440aa 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -47,19 +47,92 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
47 return false; 47 return false;
48} 48}
49 49
50struct vgpu_sched_data {
51 struct list_head lru_list;
52 struct intel_vgpu *vgpu;
53
54 ktime_t sched_in_time;
55 ktime_t sched_out_time;
56 ktime_t sched_time;
57 ktime_t left_ts;
58 ktime_t allocated_ts;
59
60 struct vgpu_sched_ctl sched_ctl;
61};
62
63struct gvt_sched_data {
64 struct intel_gvt *gvt;
65 struct hrtimer timer;
66 unsigned long period;
67 struct list_head lru_runq_head;
68};
69
70static void vgpu_update_timeslice(struct intel_vgpu *pre_vgpu)
71{
72 ktime_t delta_ts;
73 struct vgpu_sched_data *vgpu_data = pre_vgpu->sched_data;
74
75 delta_ts = vgpu_data->sched_out_time - vgpu_data->sched_in_time;
76
77 vgpu_data->sched_time += delta_ts;
78 vgpu_data->left_ts -= delta_ts;
79}
80
81#define GVT_TS_BALANCE_PERIOD_MS 100
82#define GVT_TS_BALANCE_STAGE_NUM 10
83
84static void gvt_balance_timeslice(struct gvt_sched_data *sched_data)
85{
86 struct vgpu_sched_data *vgpu_data;
87 struct list_head *pos;
88 static uint64_t stage_check;
89 int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM;
90
91 /* The timeslice accumulation reset at stage 0, which is
92 * allocated again without adding previous debt.
93 */
94 if (stage == 0) {
95 int total_weight = 0;
96 ktime_t fair_timeslice;
97
98 list_for_each(pos, &sched_data->lru_runq_head) {
99 vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
100 total_weight += vgpu_data->sched_ctl.weight;
101 }
102
103 list_for_each(pos, &sched_data->lru_runq_head) {
104 vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
105 fair_timeslice = ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS) *
106 vgpu_data->sched_ctl.weight /
107 total_weight;
108
109 vgpu_data->allocated_ts = fair_timeslice;
110 vgpu_data->left_ts = vgpu_data->allocated_ts;
111 }
112 } else {
113 list_for_each(pos, &sched_data->lru_runq_head) {
114 vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
115
116 /* timeslice for next 100ms should add the left/debt
117 * slice of previous stages.
118 */
119 vgpu_data->left_ts += vgpu_data->allocated_ts;
120 }
121 }
122}
123
50static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) 124static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
51{ 125{
52 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 126 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
53 enum intel_engine_id i; 127 enum intel_engine_id i;
54 struct intel_engine_cs *engine; 128 struct intel_engine_cs *engine;
129 struct vgpu_sched_data *vgpu_data;
130 ktime_t cur_time;
55 131
56 /* no target to schedule */ 132 /* no target to schedule */
57 if (!scheduler->next_vgpu) 133 if (!scheduler->next_vgpu)
58 return; 134 return;
59 135
60 gvt_dbg_sched("try to schedule next vgpu %d\n",
61 scheduler->next_vgpu->id);
62
63 /* 136 /*
64 * after the flag is set, workload dispatch thread will 137 * after the flag is set, workload dispatch thread will
65 * stop dispatching workload for current vgpu 138 * stop dispatching workload for current vgpu
@@ -68,14 +141,18 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
68 141
69 /* still have uncompleted workload? */ 142 /* still have uncompleted workload? */
70 for_each_engine(engine, gvt->dev_priv, i) { 143 for_each_engine(engine, gvt->dev_priv, i) {
71 if (scheduler->current_workload[i]) { 144 if (scheduler->current_workload[i])
72 gvt_dbg_sched("still have running workload\n");
73 return; 145 return;
74 }
75 } 146 }
76 147
77 gvt_dbg_sched("switch to next vgpu %d\n", 148 cur_time = ktime_get();
78 scheduler->next_vgpu->id); 149 if (scheduler->current_vgpu) {
150 vgpu_data = scheduler->current_vgpu->sched_data;
151 vgpu_data->sched_out_time = cur_time;
152 vgpu_update_timeslice(scheduler->current_vgpu);
153 }
154 vgpu_data = scheduler->next_vgpu->sched_data;
155 vgpu_data->sched_in_time = cur_time;
79 156
80 /* switch current vgpu */ 157 /* switch current vgpu */
81 scheduler->current_vgpu = scheduler->next_vgpu; 158 scheduler->current_vgpu = scheduler->next_vgpu;
@@ -88,97 +165,106 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
88 wake_up(&scheduler->waitq[i]); 165 wake_up(&scheduler->waitq[i]);
89} 166}
90 167
91struct tbs_vgpu_data { 168static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
92 struct list_head list;
93 struct intel_vgpu *vgpu;
94 /* put some per-vgpu sched stats here */
95};
96
97struct tbs_sched_data {
98 struct intel_gvt *gvt;
99 struct delayed_work work;
100 unsigned long period;
101 struct list_head runq_head;
102};
103
104#define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1))
105
106static void tbs_sched_func(struct work_struct *work)
107{ 169{
108 struct tbs_sched_data *sched_data = container_of(work, 170 struct vgpu_sched_data *vgpu_data;
109 struct tbs_sched_data, work.work);
110 struct tbs_vgpu_data *vgpu_data;
111
112 struct intel_gvt *gvt = sched_data->gvt;
113 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
114
115 struct intel_vgpu *vgpu = NULL; 171 struct intel_vgpu *vgpu = NULL;
116 struct list_head *pos, *head; 172 struct list_head *head = &sched_data->lru_runq_head;
117 173 struct list_head *pos;
118 mutex_lock(&gvt->lock);
119
120 /* no vgpu or has already had a target */
121 if (list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
122 goto out;
123
124 if (scheduler->current_vgpu) {
125 vgpu_data = scheduler->current_vgpu->sched_data;
126 head = &vgpu_data->list;
127 } else {
128 head = &sched_data->runq_head;
129 }
130 174
131 /* search a vgpu with pending workload */ 175 /* search a vgpu with pending workload */
132 list_for_each(pos, head) { 176 list_for_each(pos, head) {
133 if (pos == &sched_data->runq_head)
134 continue;
135 177
136 vgpu_data = container_of(pos, struct tbs_vgpu_data, list); 178 vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
137 if (!vgpu_has_pending_workload(vgpu_data->vgpu)) 179 if (!vgpu_has_pending_workload(vgpu_data->vgpu))
138 continue; 180 continue;
139 181
140 vgpu = vgpu_data->vgpu; 182 /* Return the vGPU only if it has time slice left */
141 break; 183 if (vgpu_data->left_ts > 0) {
184 vgpu = vgpu_data->vgpu;
185 break;
186 }
142 } 187 }
143 188
189 return vgpu;
190}
191
192/* in nanosecond */
193#define GVT_DEFAULT_TIME_SLICE 1000000
194
195static void tbs_sched_func(struct gvt_sched_data *sched_data)
196{
197 struct intel_gvt *gvt = sched_data->gvt;
198 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
199 struct vgpu_sched_data *vgpu_data;
200 struct intel_vgpu *vgpu = NULL;
201 static uint64_t timer_check;
202
203 if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS))
204 gvt_balance_timeslice(sched_data);
205
206 /* no active vgpu or has already had a target */
207 if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
208 goto out;
209
210 vgpu = find_busy_vgpu(sched_data);
144 if (vgpu) { 211 if (vgpu) {
145 scheduler->next_vgpu = vgpu; 212 scheduler->next_vgpu = vgpu;
146 gvt_dbg_sched("pick next vgpu %d\n", vgpu->id); 213
214 /* Move the last used vGPU to the tail of lru_list */
215 vgpu_data = vgpu->sched_data;
216 list_del_init(&vgpu_data->lru_list);
217 list_add_tail(&vgpu_data->lru_list,
218 &sched_data->lru_runq_head);
219 } else {
220 scheduler->next_vgpu = gvt->idle_vgpu;
147 } 221 }
148out: 222out:
149 if (scheduler->next_vgpu) { 223 if (scheduler->next_vgpu)
150 gvt_dbg_sched("try to schedule next vgpu %d\n",
151 scheduler->next_vgpu->id);
152 try_to_schedule_next_vgpu(gvt); 224 try_to_schedule_next_vgpu(gvt);
153 } 225}
154 226
155 /* 227void intel_gvt_schedule(struct intel_gvt *gvt)
156 * still have vgpu on runq 228{
157 * or last schedule haven't finished due to running workload 229 struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
158 */
159 if (!list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
160 schedule_delayed_work(&sched_data->work, sched_data->period);
161 230
231 mutex_lock(&gvt->lock);
232 tbs_sched_func(sched_data);
162 mutex_unlock(&gvt->lock); 233 mutex_unlock(&gvt->lock);
163} 234}
164 235
236static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
237{
238 struct gvt_sched_data *data;
239
240 data = container_of(timer_data, struct gvt_sched_data, timer);
241
242 intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED);
243
244 hrtimer_add_expires_ns(&data->timer, data->period);
245
246 return HRTIMER_RESTART;
247}
248
165static int tbs_sched_init(struct intel_gvt *gvt) 249static int tbs_sched_init(struct intel_gvt *gvt)
166{ 250{
167 struct intel_gvt_workload_scheduler *scheduler = 251 struct intel_gvt_workload_scheduler *scheduler =
168 &gvt->scheduler; 252 &gvt->scheduler;
169 253
170 struct tbs_sched_data *data; 254 struct gvt_sched_data *data;
171 255
172 data = kzalloc(sizeof(*data), GFP_KERNEL); 256 data = kzalloc(sizeof(*data), GFP_KERNEL);
173 if (!data) 257 if (!data)
174 return -ENOMEM; 258 return -ENOMEM;
175 259
176 INIT_LIST_HEAD(&data->runq_head); 260 INIT_LIST_HEAD(&data->lru_runq_head);
177 INIT_DELAYED_WORK(&data->work, tbs_sched_func); 261 hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
262 data->timer.function = tbs_timer_fn;
178 data->period = GVT_DEFAULT_TIME_SLICE; 263 data->period = GVT_DEFAULT_TIME_SLICE;
179 data->gvt = gvt; 264 data->gvt = gvt;
180 265
181 scheduler->sched_data = data; 266 scheduler->sched_data = data;
267
182 return 0; 268 return 0;
183} 269}
184 270
@@ -186,25 +272,28 @@ static void tbs_sched_clean(struct intel_gvt *gvt)
186{ 272{
187 struct intel_gvt_workload_scheduler *scheduler = 273 struct intel_gvt_workload_scheduler *scheduler =
188 &gvt->scheduler; 274 &gvt->scheduler;
189 struct tbs_sched_data *data = scheduler->sched_data; 275 struct gvt_sched_data *data = scheduler->sched_data;
276
277 hrtimer_cancel(&data->timer);
190 278
191 cancel_delayed_work(&data->work);
192 kfree(data); 279 kfree(data);
193 scheduler->sched_data = NULL; 280 scheduler->sched_data = NULL;
194} 281}
195 282
196static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu) 283static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
197{ 284{
198 struct tbs_vgpu_data *data; 285 struct vgpu_sched_data *data;
199 286
200 data = kzalloc(sizeof(*data), GFP_KERNEL); 287 data = kzalloc(sizeof(*data), GFP_KERNEL);
201 if (!data) 288 if (!data)
202 return -ENOMEM; 289 return -ENOMEM;
203 290
291 data->sched_ctl.weight = vgpu->sched_ctl.weight;
204 data->vgpu = vgpu; 292 data->vgpu = vgpu;
205 INIT_LIST_HEAD(&data->list); 293 INIT_LIST_HEAD(&data->lru_list);
206 294
207 vgpu->sched_data = data; 295 vgpu->sched_data = data;
296
208 return 0; 297 return 0;
209} 298}
210 299
@@ -216,21 +305,24 @@ static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
216 305
217static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) 306static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
218{ 307{
219 struct tbs_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; 308 struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
220 struct tbs_vgpu_data *vgpu_data = vgpu->sched_data; 309 struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
221 310
222 if (!list_empty(&vgpu_data->list)) 311 if (!list_empty(&vgpu_data->lru_list))
223 return; 312 return;
224 313
225 list_add_tail(&vgpu_data->list, &sched_data->runq_head); 314 list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head);
226 schedule_delayed_work(&sched_data->work, 0); 315
316 if (!hrtimer_active(&sched_data->timer))
317 hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
318 sched_data->period), HRTIMER_MODE_ABS);
227} 319}
228 320
229static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) 321static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
230{ 322{
231 struct tbs_vgpu_data *vgpu_data = vgpu->sched_data; 323 struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
232 324
233 list_del_init(&vgpu_data->list); 325 list_del_init(&vgpu_data->lru_list);
234} 326}
235 327
236static struct intel_gvt_sched_policy_ops tbs_schedule_ops = { 328static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.h b/drivers/gpu/drm/i915/gvt/sched_policy.h
index bb8b9097e41a..ba00a5f7455f 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.h
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.h
@@ -43,6 +43,8 @@ struct intel_gvt_sched_policy_ops {
43 void (*stop_schedule)(struct intel_vgpu *vgpu); 43 void (*stop_schedule)(struct intel_vgpu *vgpu);
44}; 44};
45 45
46void intel_gvt_schedule(struct intel_gvt *gvt);
47
46int intel_gvt_init_sched_policy(struct intel_gvt *gvt); 48int intel_gvt_init_sched_policy(struct intel_gvt *gvt);
47 49
48void intel_gvt_clean_sched_policy(struct intel_gvt *gvt); 50void intel_gvt_clean_sched_policy(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index ad8876bd15b3..bada32b33237 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -127,6 +127,11 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
127 return 0; 127 return 0;
128} 128}
129 129
130static inline bool is_gvt_request(struct drm_i915_gem_request *req)
131{
132 return i915_gem_context_force_single_submission(req->ctx);
133}
134
130static int shadow_context_status_change(struct notifier_block *nb, 135static int shadow_context_status_change(struct notifier_block *nb,
131 unsigned long action, void *data) 136 unsigned long action, void *data)
132{ 137{
@@ -137,7 +142,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
137 struct intel_vgpu_workload *workload = 142 struct intel_vgpu_workload *workload =
138 scheduler->current_workload[req->engine->id]; 143 scheduler->current_workload[req->engine->id];
139 144
140 if (unlikely(!workload)) 145 if (!is_gvt_request(req) || unlikely(!workload))
141 return NOTIFY_OK; 146 return NOTIFY_OK;
142 147
143 switch (action) { 148 switch (action) {
@@ -274,11 +279,8 @@ static struct intel_vgpu_workload *pick_next_workload(
274 goto out; 279 goto out;
275 } 280 }
276 281
277 if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) { 282 if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
278 gvt_dbg_sched("ring id %d stop - no available workload\n",
279 ring_id);
280 goto out; 283 goto out;
281 }
282 284
283 /* 285 /*
284 * still have current workload, maybe the workload disptacher 286 * still have current workload, maybe the workload disptacher
@@ -448,7 +450,8 @@ static int workload_thread(void *priv)
448 struct intel_vgpu_workload *workload = NULL; 450 struct intel_vgpu_workload *workload = NULL;
449 struct intel_vgpu *vgpu = NULL; 451 struct intel_vgpu *vgpu = NULL;
450 int ret; 452 int ret;
451 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); 453 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
454 || IS_KABYLAKE(gvt->dev_priv);
452 DEFINE_WAIT_FUNC(wait, woken_wake_function); 455 DEFINE_WAIT_FUNC(wait, woken_wake_function);
453 456
454 kfree(p); 457 kfree(p);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 2833dfa8c9ae..2cd725c0573e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -67,7 +67,6 @@ struct shadow_per_ctx {
67}; 67};
68 68
69struct intel_shadow_wa_ctx { 69struct intel_shadow_wa_ctx {
70 struct intel_vgpu_workload *workload;
71 struct shadow_indirect_ctx indirect_ctx; 70 struct shadow_indirect_ctx indirect_ctx;
72 struct shadow_per_ctx per_ctx; 71 struct shadow_per_ctx per_ctx;
73 72
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 41cfa5ccae84..6e3cbd8caec2 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -64,18 +64,28 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
64 WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); 64 WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
65} 65}
66 66
67#define VGPU_MAX_WEIGHT 16
68#define VGPU_WEIGHT(vgpu_num) \
69 (VGPU_MAX_WEIGHT / (vgpu_num))
70
67static struct { 71static struct {
68 unsigned int low_mm; 72 unsigned int low_mm;
69 unsigned int high_mm; 73 unsigned int high_mm;
70 unsigned int fence; 74 unsigned int fence;
75
76 /* A vGPU with a weight of 8 will get twice as much GPU as a vGPU
77 * with a weight of 4 on a contended host, different vGPU type has
78 * different weight set. Legal weights range from 1 to 16.
79 */
80 unsigned int weight;
71 enum intel_vgpu_edid edid; 81 enum intel_vgpu_edid edid;
72 char *name; 82 char *name;
73} vgpu_types[] = { 83} vgpu_types[] = {
74/* Fixed vGPU type table */ 84/* Fixed vGPU type table */
75 { MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" }, 85 { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" },
76 { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" }, 86 { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200, "4" },
77 { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" }, 87 { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200, "2" },
78 { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" }, 88 { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, VGPU_WEIGHT(1), GVT_EDID_1920_1200, "1" },
79}; 89};
80 90
81/** 91/**
@@ -120,6 +130,12 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
120 gvt->types[i].low_gm_size = vgpu_types[i].low_mm; 130 gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
121 gvt->types[i].high_gm_size = vgpu_types[i].high_mm; 131 gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
122 gvt->types[i].fence = vgpu_types[i].fence; 132 gvt->types[i].fence = vgpu_types[i].fence;
133
134 if (vgpu_types[i].weight < 1 ||
135 vgpu_types[i].weight > VGPU_MAX_WEIGHT)
136 return -EINVAL;
137
138 gvt->types[i].weight = vgpu_types[i].weight;
123 gvt->types[i].resolution = vgpu_types[i].edid; 139 gvt->types[i].resolution = vgpu_types[i].edid;
124 gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm, 140 gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
125 high_avail / vgpu_types[i].high_mm); 141 high_avail / vgpu_types[i].high_mm);
@@ -131,11 +147,12 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
131 sprintf(gvt->types[i].name, "GVTg_V5_%s", 147 sprintf(gvt->types[i].name, "GVTg_V5_%s",
132 vgpu_types[i].name); 148 vgpu_types[i].name);
133 149
134 gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u res %s\n", 150 gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
135 i, gvt->types[i].name, 151 i, gvt->types[i].name,
136 gvt->types[i].avail_instance, 152 gvt->types[i].avail_instance,
137 gvt->types[i].low_gm_size, 153 gvt->types[i].low_gm_size,
138 gvt->types[i].high_gm_size, gvt->types[i].fence, 154 gvt->types[i].high_gm_size, gvt->types[i].fence,
155 gvt->types[i].weight,
139 vgpu_edid_str(gvt->types[i].resolution)); 156 vgpu_edid_str(gvt->types[i].resolution));
140 } 157 }
141 158
@@ -179,20 +196,34 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
179} 196}
180 197
181/** 198/**
182 * intel_gvt_destroy_vgpu - destroy a virtual GPU 199 * intel_gvt_active_vgpu - activate a virtual GPU
183 * @vgpu: virtual GPU 200 * @vgpu: virtual GPU
184 * 201 *
185 * This function is called when user wants to destroy a virtual GPU. 202 * This function is called when user wants to activate a virtual GPU.
186 * 203 *
187 */ 204 */
188void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) 205void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
206{
207 mutex_lock(&vgpu->gvt->lock);
208 vgpu->active = true;
209 mutex_unlock(&vgpu->gvt->lock);
210}
211
212/**
213 * intel_gvt_deactive_vgpu - deactivate a virtual GPU
214 * @vgpu: virtual GPU
215 *
216 * This function is called when user wants to deactivate a virtual GPU.
217 * All virtual GPU runtime information will be destroyed.
218 *
219 */
220void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
189{ 221{
190 struct intel_gvt *gvt = vgpu->gvt; 222 struct intel_gvt *gvt = vgpu->gvt;
191 223
192 mutex_lock(&gvt->lock); 224 mutex_lock(&gvt->lock);
193 225
194 vgpu->active = false; 226 vgpu->active = false;
195 idr_remove(&gvt->vgpu_idr, vgpu->id);
196 227
197 if (atomic_read(&vgpu->running_workload_num)) { 228 if (atomic_read(&vgpu->running_workload_num)) {
198 mutex_unlock(&gvt->lock); 229 mutex_unlock(&gvt->lock);
@@ -201,6 +232,26 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
201 } 232 }
202 233
203 intel_vgpu_stop_schedule(vgpu); 234 intel_vgpu_stop_schedule(vgpu);
235
236 mutex_unlock(&gvt->lock);
237}
238
239/**
240 * intel_gvt_destroy_vgpu - destroy a virtual GPU
241 * @vgpu: virtual GPU
242 *
243 * This function is called when user wants to destroy a virtual GPU.
244 *
245 */
246void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
247{
248 struct intel_gvt *gvt = vgpu->gvt;
249
250 mutex_lock(&gvt->lock);
251
252 WARN(vgpu->active, "vGPU is still active!\n");
253
254 idr_remove(&gvt->vgpu_idr, vgpu->id);
204 intel_vgpu_clean_sched_policy(vgpu); 255 intel_vgpu_clean_sched_policy(vgpu);
205 intel_vgpu_clean_gvt_context(vgpu); 256 intel_vgpu_clean_gvt_context(vgpu);
206 intel_vgpu_clean_execlist(vgpu); 257 intel_vgpu_clean_execlist(vgpu);
@@ -216,6 +267,59 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
216 mutex_unlock(&gvt->lock); 267 mutex_unlock(&gvt->lock);
217} 268}
218 269
270#define IDLE_VGPU_IDR 0
271
272/**
273 * intel_gvt_create_idle_vgpu - create an idle virtual GPU
274 * @gvt: GVT device
275 *
276 * This function is called when user wants to create an idle virtual GPU.
277 *
278 * Returns:
279 * pointer to intel_vgpu, error pointer if failed.
280 */
281struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
282{
283 struct intel_vgpu *vgpu;
284 enum intel_engine_id i;
285 int ret;
286
287 vgpu = vzalloc(sizeof(*vgpu));
288 if (!vgpu)
289 return ERR_PTR(-ENOMEM);
290
291 vgpu->id = IDLE_VGPU_IDR;
292 vgpu->gvt = gvt;
293
294 for (i = 0; i < I915_NUM_ENGINES; i++)
295 INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
296
297 ret = intel_vgpu_init_sched_policy(vgpu);
298 if (ret)
299 goto out_free_vgpu;
300
301 vgpu->active = false;
302
303 return vgpu;
304
305out_free_vgpu:
306 vfree(vgpu);
307 return ERR_PTR(ret);
308}
309
310/**
311 * intel_gvt_destroy_vgpu - destroy an idle virtual GPU
312 * @vgpu: virtual GPU
313 *
314 * This function is called when user wants to destroy an idle virtual GPU.
315 *
316 */
317void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
318{
319 intel_vgpu_clean_sched_policy(vgpu);
320 vfree(vgpu);
321}
322
219static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, 323static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
220 struct intel_vgpu_creation_params *param) 324 struct intel_vgpu_creation_params *param)
221{ 325{
@@ -232,13 +336,15 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
232 336
233 mutex_lock(&gvt->lock); 337 mutex_lock(&gvt->lock);
234 338
235 ret = idr_alloc(&gvt->vgpu_idr, vgpu, 1, GVT_MAX_VGPU, GFP_KERNEL); 339 ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
340 GFP_KERNEL);
236 if (ret < 0) 341 if (ret < 0)
237 goto out_free_vgpu; 342 goto out_free_vgpu;
238 343
239 vgpu->id = ret; 344 vgpu->id = ret;
240 vgpu->handle = param->handle; 345 vgpu->handle = param->handle;
241 vgpu->gvt = gvt; 346 vgpu->gvt = gvt;
347 vgpu->sched_ctl.weight = param->weight;
242 bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES); 348 bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
243 349
244 intel_vgpu_init_cfg_space(vgpu, param->primary); 350 intel_vgpu_init_cfg_space(vgpu, param->primary);
@@ -277,7 +383,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
277 if (ret) 383 if (ret)
278 goto out_clean_shadow_ctx; 384 goto out_clean_shadow_ctx;
279 385
280 vgpu->active = true;
281 mutex_unlock(&gvt->lock); 386 mutex_unlock(&gvt->lock);
282 387
283 return vgpu; 388 return vgpu;
@@ -325,6 +430,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
325 param.low_gm_sz = type->low_gm_size; 430 param.low_gm_sz = type->low_gm_size;
326 param.high_gm_sz = type->high_gm_size; 431 param.high_gm_sz = type->high_gm_size;
327 param.fence_sz = type->fence; 432 param.fence_sz = type->fence;
433 param.weight = type->weight;
328 param.resolution = type->resolution; 434 param.resolution = type->resolution;
329 435
330 /* XXX current param based on MB */ 436 /* XXX current param based on MB */
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 47e707d83c4d..d689e511744e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1012,9 +1012,12 @@ static int gpu_state_release(struct inode *inode, struct file *file)
1012 1012
1013static int i915_gpu_info_open(struct inode *inode, struct file *file) 1013static int i915_gpu_info_open(struct inode *inode, struct file *file)
1014{ 1014{
1015 struct drm_i915_private *i915 = inode->i_private;
1015 struct i915_gpu_state *gpu; 1016 struct i915_gpu_state *gpu;
1016 1017
1017 gpu = i915_capture_gpu_state(inode->i_private); 1018 intel_runtime_pm_get(i915);
1019 gpu = i915_capture_gpu_state(i915);
1020 intel_runtime_pm_put(i915);
1018 if (!gpu) 1021 if (!gpu)
1019 return -ENOMEM; 1022 return -ENOMEM;
1020 1023
@@ -1459,16 +1462,14 @@ static int ironlake_drpc_info(struct seq_file *m)
1459 1462
1460static int i915_forcewake_domains(struct seq_file *m, void *data) 1463static int i915_forcewake_domains(struct seq_file *m, void *data)
1461{ 1464{
1462 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1465 struct drm_i915_private *i915 = node_to_i915(m->private);
1463 struct intel_uncore_forcewake_domain *fw_domain; 1466 struct intel_uncore_forcewake_domain *fw_domain;
1467 unsigned int tmp;
1464 1468
1465 spin_lock_irq(&dev_priv->uncore.lock); 1469 for_each_fw_domain(fw_domain, i915, tmp)
1466 for_each_fw_domain(fw_domain, dev_priv) {
1467 seq_printf(m, "%s.wake_count = %u\n", 1470 seq_printf(m, "%s.wake_count = %u\n",
1468 intel_uncore_forcewake_domain_to_str(fw_domain->id), 1471 intel_uncore_forcewake_domain_to_str(fw_domain->id),
1469 fw_domain->wake_count); 1472 READ_ONCE(fw_domain->wake_count));
1470 }
1471 spin_unlock_irq(&dev_priv->uncore.lock);
1472 1473
1473 return 0; 1474 return 0;
1474} 1475}
@@ -1938,9 +1939,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1938 1939
1939static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring) 1940static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1940{ 1941{
1941 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)", 1942 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u)",
1942 ring->space, ring->head, ring->tail, 1943 ring->space, ring->head, ring->tail);
1943 ring->last_retired_head);
1944} 1944}
1945 1945
1946static int i915_context_status(struct seq_file *m, void *unused) 1946static int i915_context_status(struct seq_file *m, void *unused)
@@ -2474,9 +2474,9 @@ static void i915_guc_client_info(struct seq_file *m,
2474 enum intel_engine_id id; 2474 enum intel_engine_id id;
2475 uint64_t tot = 0; 2475 uint64_t tot = 0;
2476 2476
2477 seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n", 2477 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2478 client->priority, client->ctx_index, client->proc_desc_offset); 2478 client->priority, client->stage_id, client->proc_desc_offset);
2479 seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n", 2479 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx, cookie 0x%x\n",
2480 client->doorbell_id, client->doorbell_offset, client->doorbell_cookie); 2480 client->doorbell_id, client->doorbell_offset, client->doorbell_cookie);
2481 seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n", 2481 seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
2482 client->wq_size, client->wq_offset, client->wq_tail); 2482 client->wq_size, client->wq_offset, client->wq_tail);
@@ -2511,7 +2511,7 @@ static int i915_guc_info(struct seq_file *m, void *data)
2511 } 2511 }
2512 2512
2513 seq_printf(m, "Doorbell map:\n"); 2513 seq_printf(m, "Doorbell map:\n");
2514 seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc->doorbell_bitmap); 2514 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2515 seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline); 2515 seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline);
2516 2516
2517 seq_printf(m, "GuC total action count: %llu\n", guc->action_count); 2517 seq_printf(m, "GuC total action count: %llu\n", guc->action_count);
@@ -4129,7 +4129,9 @@ i915_wedged_get(void *data, u64 *val)
4129static int 4129static int
4130i915_wedged_set(void *data, u64 val) 4130i915_wedged_set(void *data, u64 val)
4131{ 4131{
4132 struct drm_i915_private *dev_priv = data; 4132 struct drm_i915_private *i915 = data;
4133 struct intel_engine_cs *engine;
4134 unsigned int tmp;
4133 4135
4134 /* 4136 /*
4135 * There is no safeguard against this debugfs entry colliding 4137 * There is no safeguard against this debugfs entry colliding
@@ -4139,13 +4141,17 @@ i915_wedged_set(void *data, u64 val)
4139 * while it is writing to 'i915_wedged' 4141 * while it is writing to 'i915_wedged'
4140 */ 4142 */
4141 4143
4142 if (i915_reset_backoff(&dev_priv->gpu_error)) 4144 if (i915_reset_backoff(&i915->gpu_error))
4143 return -EAGAIN; 4145 return -EAGAIN;
4144 4146
4145 i915_handle_error(dev_priv, val, 4147 for_each_engine_masked(engine, i915, val, tmp) {
4146 "Manually setting wedged to %llu", val); 4148 engine->hangcheck.seqno = intel_engine_get_seqno(engine);
4149 engine->hangcheck.stalled = true;
4150 }
4151
4152 i915_handle_error(i915, val, "Manually setting wedged to %llu", val);
4147 4153
4148 wait_on_bit(&dev_priv->gpu_error.flags, 4154 wait_on_bit(&i915->gpu_error.flags,
4149 I915_RESET_HANDOFF, 4155 I915_RESET_HANDOFF,
4150 TASK_UNINTERRUPTIBLE); 4156 TASK_UNINTERRUPTIBLE);
4151 4157
@@ -4173,10 +4179,6 @@ fault_irq_set(struct drm_i915_private *i915,
4173 if (err) 4179 if (err)
4174 goto err_unlock; 4180 goto err_unlock;
4175 4181
4176 /* Retire to kick idle work */
4177 i915_gem_retire_requests(i915);
4178 GEM_BUG_ON(i915->gt.active_requests);
4179
4180 *irq = val; 4182 *irq = val;
4181 mutex_unlock(&i915->drm.struct_mutex); 4183 mutex_unlock(&i915->drm.struct_mutex);
4182 4184
@@ -4280,7 +4282,7 @@ i915_drop_caches_set(void *data, u64 val)
4280 goto unlock; 4282 goto unlock;
4281 } 4283 }
4282 4284
4283 if (val & (DROP_RETIRE | DROP_ACTIVE)) 4285 if (val & DROP_RETIRE)
4284 i915_gem_retire_requests(dev_priv); 4286 i915_gem_retire_requests(dev_priv);
4285 4287
4286 lockdep_set_current_reclaim_state(GFP_KERNEL); 4288 lockdep_set_current_reclaim_state(GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 98b17070a123..3036d4835b0f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -549,6 +549,7 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
549static void i915_gem_fini(struct drm_i915_private *dev_priv) 549static void i915_gem_fini(struct drm_i915_private *dev_priv)
550{ 550{
551 mutex_lock(&dev_priv->drm.struct_mutex); 551 mutex_lock(&dev_priv->drm.struct_mutex);
552 intel_uc_fini_hw(dev_priv);
552 i915_gem_cleanup_engines(dev_priv); 553 i915_gem_cleanup_engines(dev_priv);
553 i915_gem_context_fini(dev_priv); 554 i915_gem_context_fini(dev_priv);
554 mutex_unlock(&dev_priv->drm.struct_mutex); 555 mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -609,7 +610,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
609 610
610 ret = i915_gem_init(dev_priv); 611 ret = i915_gem_init(dev_priv);
611 if (ret) 612 if (ret)
612 goto cleanup_irq; 613 goto cleanup_uc;
613 614
614 intel_modeset_gem_init(dev); 615 intel_modeset_gem_init(dev);
615 616
@@ -631,9 +632,9 @@ cleanup_gem:
631 if (i915_gem_suspend(dev_priv)) 632 if (i915_gem_suspend(dev_priv))
632 DRM_ERROR("failed to idle hardware; continuing to unload!\n"); 633 DRM_ERROR("failed to idle hardware; continuing to unload!\n");
633 i915_gem_fini(dev_priv); 634 i915_gem_fini(dev_priv);
635cleanup_uc:
636 intel_uc_fini_fw(dev_priv);
634cleanup_irq: 637cleanup_irq:
635 intel_guc_fini(dev_priv);
636 intel_huc_fini(dev_priv);
637 drm_irq_uninstall(dev); 638 drm_irq_uninstall(dev);
638 intel_teardown_gmbus(dev_priv); 639 intel_teardown_gmbus(dev_priv);
639cleanup_csr: 640cleanup_csr:
@@ -1351,9 +1352,8 @@ void i915_driver_unload(struct drm_device *dev)
1351 /* Flush any outstanding unpin_work. */ 1352 /* Flush any outstanding unpin_work. */
1352 drain_workqueue(dev_priv->wq); 1353 drain_workqueue(dev_priv->wq);
1353 1354
1354 intel_guc_fini(dev_priv);
1355 intel_huc_fini(dev_priv);
1356 i915_gem_fini(dev_priv); 1355 i915_gem_fini(dev_priv);
1356 intel_uc_fini_fw(dev_priv);
1357 intel_fbc_cleanup_cfb(dev_priv); 1357 intel_fbc_cleanup_cfb(dev_priv);
1358 1358
1359 intel_power_domains_fini(dev_priv); 1359 intel_power_domains_fini(dev_priv);
@@ -1469,8 +1469,6 @@ static int i915_drm_suspend(struct drm_device *dev)
1469 goto out; 1469 goto out;
1470 } 1470 }
1471 1471
1472 intel_guc_suspend(dev_priv);
1473
1474 intel_display_suspend(dev); 1472 intel_display_suspend(dev);
1475 1473
1476 intel_dp_mst_suspend(dev); 1474 intel_dp_mst_suspend(dev);
@@ -2177,6 +2175,20 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
2177 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); 2175 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
2178} 2176}
2179 2177
2178static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv,
2179 u32 mask, u32 val)
2180{
2181 /* The HW does not like us polling for PW_STATUS frequently, so
2182 * use the sleeping loop rather than risk the busy spin within
2183 * intel_wait_for_register().
2184 *
2185 * Transitioning between RC6 states should be at most 2ms (see
2186 * valleyview_enable_rps) so use a 3ms timeout.
2187 */
2188 return wait_for((I915_READ_NOTRACE(VLV_GTLC_PW_STATUS) & mask) == val,
2189 3);
2190}
2191
2180int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) 2192int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
2181{ 2193{
2182 u32 val; 2194 u32 val;
@@ -2205,8 +2217,9 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
2205 2217
2206static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) 2218static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
2207{ 2219{
2220 u32 mask;
2208 u32 val; 2221 u32 val;
2209 int err = 0; 2222 int err;
2210 2223
2211 val = I915_READ(VLV_GTLC_WAKE_CTRL); 2224 val = I915_READ(VLV_GTLC_WAKE_CTRL);
2212 val &= ~VLV_GTLC_ALLOWWAKEREQ; 2225 val &= ~VLV_GTLC_ALLOWWAKEREQ;
@@ -2215,45 +2228,32 @@ static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
2215 I915_WRITE(VLV_GTLC_WAKE_CTRL, val); 2228 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
2216 POSTING_READ(VLV_GTLC_WAKE_CTRL); 2229 POSTING_READ(VLV_GTLC_WAKE_CTRL);
2217 2230
2218 err = intel_wait_for_register(dev_priv, 2231 mask = VLV_GTLC_ALLOWWAKEACK;
2219 VLV_GTLC_PW_STATUS, 2232 val = allow ? mask : 0;
2220 VLV_GTLC_ALLOWWAKEACK, 2233
2221 allow, 2234 err = vlv_wait_for_pw_status(dev_priv, mask, val);
2222 1);
2223 if (err) 2235 if (err)
2224 DRM_ERROR("timeout disabling GT waking\n"); 2236 DRM_ERROR("timeout disabling GT waking\n");
2225 2237
2226 return err; 2238 return err;
2227} 2239}
2228 2240
2229static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, 2241static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
2230 bool wait_for_on) 2242 bool wait_for_on)
2231{ 2243{
2232 u32 mask; 2244 u32 mask;
2233 u32 val; 2245 u32 val;
2234 int err;
2235 2246
2236 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; 2247 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
2237 val = wait_for_on ? mask : 0; 2248 val = wait_for_on ? mask : 0;
2238 if ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
2239 return 0;
2240
2241 DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
2242 onoff(wait_for_on),
2243 I915_READ(VLV_GTLC_PW_STATUS));
2244 2249
2245 /* 2250 /*
2246 * RC6 transitioning can be delayed up to 2 msec (see 2251 * RC6 transitioning can be delayed up to 2 msec (see
2247 * valleyview_enable_rps), use 3 msec for safety. 2252 * valleyview_enable_rps), use 3 msec for safety.
2248 */ 2253 */
2249 err = intel_wait_for_register(dev_priv, 2254 if (vlv_wait_for_pw_status(dev_priv, mask, val))
2250 VLV_GTLC_PW_STATUS, mask, val,
2251 3);
2252 if (err)
2253 DRM_ERROR("timeout waiting for GT wells to go %s\n", 2255 DRM_ERROR("timeout waiting for GT wells to go %s\n",
2254 onoff(wait_for_on)); 2256 onoff(wait_for_on));
2255
2256 return err;
2257} 2257}
2258 2258
2259static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) 2259static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
@@ -2274,7 +2274,7 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
2274 * Bspec defines the following GT well on flags as debug only, so 2274 * Bspec defines the following GT well on flags as debug only, so
2275 * don't treat them as hard failures. 2275 * don't treat them as hard failures.
2276 */ 2276 */
2277 (void)vlv_wait_for_gt_wells(dev_priv, false); 2277 vlv_wait_for_gt_wells(dev_priv, false);
2278 2278
2279 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS; 2279 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
2280 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask); 2280 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a5947a496d0a..c9b0949f6c1a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -79,26 +79,8 @@
79 79
80#define DRIVER_NAME "i915" 80#define DRIVER_NAME "i915"
81#define DRIVER_DESC "Intel Graphics" 81#define DRIVER_DESC "Intel Graphics"
82#define DRIVER_DATE "20170320" 82#define DRIVER_DATE "20170403"
83#define DRIVER_TIMESTAMP 1489994464 83#define DRIVER_TIMESTAMP 1491198738
84
85#undef WARN_ON
86/* Many gcc seem to no see through this and fall over :( */
87#if 0
88#define WARN_ON(x) ({ \
89 bool __i915_warn_cond = (x); \
90 if (__builtin_constant_p(__i915_warn_cond)) \
91 BUILD_BUG_ON(__i915_warn_cond); \
92 WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
93#else
94#define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
95#endif
96
97#undef WARN_ON_ONCE
98#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")")
99
100#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
101 (long) (x), __func__);
102 84
103/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and 85/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
104 * WARN_ON()) for hw state sanity checks to check for unexpected conditions 86 * WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -703,9 +685,9 @@ enum forcewake_domain_id {
703}; 685};
704 686
705enum forcewake_domains { 687enum forcewake_domains {
706 FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER), 688 FORCEWAKE_RENDER = BIT(FW_DOMAIN_ID_RENDER),
707 FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER), 689 FORCEWAKE_BLITTER = BIT(FW_DOMAIN_ID_BLITTER),
708 FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA), 690 FORCEWAKE_MEDIA = BIT(FW_DOMAIN_ID_MEDIA),
709 FORCEWAKE_ALL = (FORCEWAKE_RENDER | 691 FORCEWAKE_ALL = (FORCEWAKE_RENDER |
710 FORCEWAKE_BLITTER | 692 FORCEWAKE_BLITTER |
711 FORCEWAKE_MEDIA) 693 FORCEWAKE_MEDIA)
@@ -732,21 +714,25 @@ intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
732 714
733struct intel_uncore_funcs { 715struct intel_uncore_funcs {
734 void (*force_wake_get)(struct drm_i915_private *dev_priv, 716 void (*force_wake_get)(struct drm_i915_private *dev_priv,
735 enum forcewake_domains domains); 717 enum forcewake_domains domains);
736 void (*force_wake_put)(struct drm_i915_private *dev_priv, 718 void (*force_wake_put)(struct drm_i915_private *dev_priv,
737 enum forcewake_domains domains); 719 enum forcewake_domains domains);
738 720
739 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 721 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv,
740 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 722 i915_reg_t r, bool trace);
741 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 723 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv,
742 uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 724 i915_reg_t r, bool trace);
743 725 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv,
744 void (*mmio_writeb)(struct drm_i915_private *dev_priv, i915_reg_t r, 726 i915_reg_t r, bool trace);
745 uint8_t val, bool trace); 727 uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv,
746 void (*mmio_writew)(struct drm_i915_private *dev_priv, i915_reg_t r, 728 i915_reg_t r, bool trace);
747 uint16_t val, bool trace); 729
748 void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r, 730 void (*mmio_writeb)(struct drm_i915_private *dev_priv,
749 uint32_t val, bool trace); 731 i915_reg_t r, uint8_t val, bool trace);
732 void (*mmio_writew)(struct drm_i915_private *dev_priv,
733 i915_reg_t r, uint16_t val, bool trace);
734 void (*mmio_writel)(struct drm_i915_private *dev_priv,
735 i915_reg_t r, uint32_t val, bool trace);
750}; 736};
751 737
752struct intel_forcewake_range { 738struct intel_forcewake_range {
@@ -770,32 +756,35 @@ struct intel_uncore {
770 enum forcewake_domains fw_domains; 756 enum forcewake_domains fw_domains;
771 enum forcewake_domains fw_domains_active; 757 enum forcewake_domains fw_domains_active;
772 758
759 u32 fw_set;
760 u32 fw_clear;
761 u32 fw_reset;
762
773 struct intel_uncore_forcewake_domain { 763 struct intel_uncore_forcewake_domain {
774 struct drm_i915_private *i915;
775 enum forcewake_domain_id id; 764 enum forcewake_domain_id id;
776 enum forcewake_domains mask; 765 enum forcewake_domains mask;
777 unsigned wake_count; 766 unsigned wake_count;
778 struct hrtimer timer; 767 struct hrtimer timer;
779 i915_reg_t reg_set; 768 i915_reg_t reg_set;
780 u32 val_set;
781 u32 val_clear;
782 i915_reg_t reg_ack; 769 i915_reg_t reg_ack;
783 i915_reg_t reg_post;
784 u32 val_reset;
785 } fw_domain[FW_DOMAIN_ID_COUNT]; 770 } fw_domain[FW_DOMAIN_ID_COUNT];
786 771
787 int unclaimed_mmio_check; 772 int unclaimed_mmio_check;
788}; 773};
789 774
775#define __mask_next_bit(mask) ({ \
776 int __idx = ffs(mask) - 1; \
777 mask &= ~BIT(__idx); \
778 __idx; \
779})
780
790/* Iterate over initialised fw domains */ 781/* Iterate over initialised fw domains */
791#define for_each_fw_domain_masked(domain__, mask__, dev_priv__) \ 782#define for_each_fw_domain_masked(domain__, mask__, dev_priv__, tmp__) \
792 for ((domain__) = &(dev_priv__)->uncore.fw_domain[0]; \ 783 for (tmp__ = (mask__); \
793 (domain__) < &(dev_priv__)->uncore.fw_domain[FW_DOMAIN_ID_COUNT]; \ 784 tmp__ ? (domain__ = &(dev_priv__)->uncore.fw_domain[__mask_next_bit(tmp__)]), 1 : 0;)
794 (domain__)++) \
795 for_each_if ((mask__) & (domain__)->mask)
796 785
797#define for_each_fw_domain(domain__, dev_priv__) \ 786#define for_each_fw_domain(domain__, dev_priv__, tmp__) \
798 for_each_fw_domain_masked(domain__, FORCEWAKE_ALL, dev_priv__) 787 for_each_fw_domain_masked(domain__, (dev_priv__)->uncore.fw_domains, dev_priv__, tmp__)
799 788
800#define CSR_VERSION(major, minor) ((major) << 16 | (minor)) 789#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
801#define CSR_VERSION_MAJOR(version) ((version) >> 16) 790#define CSR_VERSION_MAJOR(version) ((version) >> 16)
@@ -846,6 +835,7 @@ struct intel_csr {
846 func(has_resource_streamer); \ 835 func(has_resource_streamer); \
847 func(has_runtime_pm); \ 836 func(has_runtime_pm); \
848 func(has_snoop); \ 837 func(has_snoop); \
838 func(unfenced_needs_alignment); \
849 func(cursor_needs_physical); \ 839 func(cursor_needs_physical); \
850 func(hws_needs_physical); \ 840 func(hws_needs_physical); \
851 func(overlay_needs_physical); \ 841 func(overlay_needs_physical); \
@@ -2578,12 +2568,6 @@ static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
2578 (id__)++) \ 2568 (id__)++) \
2579 for_each_if ((engine__) = (dev_priv__)->engine[(id__)]) 2569 for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
2580 2570
2581#define __mask_next_bit(mask) ({ \
2582 int __idx = ffs(mask) - 1; \
2583 mask &= ~BIT(__idx); \
2584 __idx; \
2585})
2586
2587/* Iterator over subset of engines selected by mask */ 2571/* Iterator over subset of engines selected by mask */
2588#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \ 2572#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
2589 for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask; \ 2573 for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask; \
@@ -3956,14 +3940,14 @@ u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
3956#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 3940#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
3957 3941
3958#define __raw_read(x, s) \ 3942#define __raw_read(x, s) \
3959static inline uint##x##_t __raw_i915_read##x(struct drm_i915_private *dev_priv, \ 3943static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \
3960 i915_reg_t reg) \ 3944 i915_reg_t reg) \
3961{ \ 3945{ \
3962 return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \ 3946 return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \
3963} 3947}
3964 3948
3965#define __raw_write(x, s) \ 3949#define __raw_write(x, s) \
3966static inline void __raw_i915_write##x(struct drm_i915_private *dev_priv, \ 3950static inline void __raw_i915_write##x(const struct drm_i915_private *dev_priv, \
3967 i915_reg_t reg, uint##x##_t val) \ 3951 i915_reg_t reg, uint##x##_t val) \
3968{ \ 3952{ \
3969 write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \ 3953 write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 58e1db77d70e..532a577ff7a1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2321,7 +2321,7 @@ rebuild_st:
2321 st->nents = 0; 2321 st->nents = 0;
2322 for (i = 0; i < page_count; i++) { 2322 for (i = 0; i < page_count; i++) {
2323 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2323 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2324 if (IS_ERR(page)) { 2324 if (unlikely(IS_ERR(page))) {
2325 i915_gem_shrink(dev_priv, 2325 i915_gem_shrink(dev_priv,
2326 page_count, 2326 page_count,
2327 I915_SHRINK_BOUND | 2327 I915_SHRINK_BOUND |
@@ -2329,12 +2329,21 @@ rebuild_st:
2329 I915_SHRINK_PURGEABLE); 2329 I915_SHRINK_PURGEABLE);
2330 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2330 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2331 } 2331 }
2332 if (IS_ERR(page)) { 2332 if (unlikely(IS_ERR(page))) {
2333 gfp_t reclaim;
2334
2333 /* We've tried hard to allocate the memory by reaping 2335 /* We've tried hard to allocate the memory by reaping
2334 * our own buffer, now let the real VM do its job and 2336 * our own buffer, now let the real VM do its job and
2335 * go down in flames if truly OOM. 2337 * go down in flames if truly OOM.
2338 *
2339 * However, since graphics tend to be disposable,
2340 * defer the oom here by reporting the ENOMEM back
2341 * to userspace.
2336 */ 2342 */
2337 page = shmem_read_mapping_page(mapping, i); 2343 reclaim = mapping_gfp_mask(mapping);
2344 reclaim |= __GFP_NORETRY; /* reclaim, but no oom */
2345
2346 page = shmem_read_mapping_page_gfp(mapping, i, reclaim);
2338 if (IS_ERR(page)) { 2347 if (IS_ERR(page)) {
2339 ret = PTR_ERR(page); 2348 ret = PTR_ERR(page);
2340 goto err_sg; 2349 goto err_sg;
@@ -2989,10 +2998,15 @@ void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
2989 lockdep_assert_held(&dev_priv->drm.struct_mutex); 2998 lockdep_assert_held(&dev_priv->drm.struct_mutex);
2990 set_bit(I915_WEDGED, &dev_priv->gpu_error.flags); 2999 set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
2991 3000
3001 /* Retire completed requests first so the list of inflight/incomplete
3002 * requests is accurate and we don't try and mark successful requests
3003 * as in error during __i915_gem_set_wedged_BKL().
3004 */
3005 i915_gem_retire_requests(dev_priv);
3006
2992 stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL); 3007 stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL);
2993 3008
2994 i915_gem_context_lost(dev_priv); 3009 i915_gem_context_lost(dev_priv);
2995 i915_gem_retire_requests(dev_priv);
2996 3010
2997 mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0); 3011 mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
2998} 3012}
@@ -3098,9 +3112,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
3098 * Wait for last execlists context complete, but bail out in case a 3112 * Wait for last execlists context complete, but bail out in case a
3099 * new request is submitted. 3113 * new request is submitted.
3100 */ 3114 */
3101 wait_for(READ_ONCE(dev_priv->gt.active_requests) || 3115 wait_for(intel_engines_are_idle(dev_priv), 10);
3102 intel_engines_are_idle(dev_priv),
3103 10);
3104 if (READ_ONCE(dev_priv->gt.active_requests)) 3116 if (READ_ONCE(dev_priv->gt.active_requests))
3105 return; 3117 return;
3106 3118
@@ -3259,6 +3271,29 @@ static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
3259 return 0; 3271 return 0;
3260} 3272}
3261 3273
3274static int wait_for_engine(struct intel_engine_cs *engine, int timeout_ms)
3275{
3276 return wait_for(intel_engine_is_idle(engine), timeout_ms);
3277}
3278
3279static int wait_for_engines(struct drm_i915_private *i915)
3280{
3281 struct intel_engine_cs *engine;
3282 enum intel_engine_id id;
3283
3284 for_each_engine(engine, i915, id) {
3285 if (GEM_WARN_ON(wait_for_engine(engine, 50))) {
3286 i915_gem_set_wedged(i915);
3287 return -EIO;
3288 }
3289
3290 GEM_BUG_ON(intel_engine_get_seqno(engine) !=
3291 intel_engine_last_submit(engine));
3292 }
3293
3294 return 0;
3295}
3296
3262int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags) 3297int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
3263{ 3298{
3264 int ret; 3299 int ret;
@@ -3273,13 +3308,16 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
3273 if (ret) 3308 if (ret)
3274 return ret; 3309 return ret;
3275 } 3310 }
3311
3312 i915_gem_retire_requests(i915);
3313 GEM_BUG_ON(i915->gt.active_requests);
3314
3315 ret = wait_for_engines(i915);
3276 } else { 3316 } else {
3277 ret = wait_for_timeline(&i915->gt.global_timeline, flags); 3317 ret = wait_for_timeline(&i915->gt.global_timeline, flags);
3278 if (ret)
3279 return ret;
3280 } 3318 }
3281 3319
3282 return 0; 3320 return ret;
3283} 3321}
3284 3322
3285/** Flushes the GTT write domain for the object if it's dirty. */ 3323/** Flushes the GTT write domain for the object if it's dirty. */
@@ -3307,8 +3345,14 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3307 * system agents we cannot reproduce this behaviour). 3345 * system agents we cannot reproduce this behaviour).
3308 */ 3346 */
3309 wmb(); 3347 wmb();
3310 if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) 3348 if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) {
3311 POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base)); 3349 if (intel_runtime_pm_get_if_in_use(dev_priv)) {
3350 spin_lock_irq(&dev_priv->uncore.lock);
3351 POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
3352 spin_unlock_irq(&dev_priv->uncore.lock);
3353 intel_runtime_pm_put(dev_priv);
3354 }
3355 }
3312 3356
3313 intel_fb_obj_flush(obj, write_origin(obj, I915_GEM_DOMAIN_GTT)); 3357 intel_fb_obj_flush(obj, write_origin(obj, I915_GEM_DOMAIN_GTT));
3314 3358
@@ -4408,13 +4452,12 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
4408 if (ret) 4452 if (ret)
4409 goto err_unlock; 4453 goto err_unlock;
4410 4454
4411 i915_gem_retire_requests(dev_priv);
4412 GEM_BUG_ON(dev_priv->gt.active_requests);
4413
4414 assert_kernel_context_is_current(dev_priv); 4455 assert_kernel_context_is_current(dev_priv);
4415 i915_gem_context_lost(dev_priv); 4456 i915_gem_context_lost(dev_priv);
4416 mutex_unlock(&dev->struct_mutex); 4457 mutex_unlock(&dev->struct_mutex);
4417 4458
4459 intel_guc_suspend(dev_priv);
4460
4418 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 4461 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4419 cancel_delayed_work_sync(&dev_priv->gt.retire_work); 4462 cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4420 4463
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c
index d925fb582ba7..ffd01e02fe94 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.c
@@ -168,7 +168,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
168 168
169 i915_sw_fence_await_reservation(&clflush->wait, 169 i915_sw_fence_await_reservation(&clflush->wait,
170 obj->resv, NULL, 170 obj->resv, NULL,
171 false, I915_FENCE_TIMEOUT, 171 true, I915_FENCE_TIMEOUT,
172 GFP_KERNEL); 172 GFP_KERNEL);
173 173
174 reservation_object_lock(obj->resv, NULL); 174 reservation_object_lock(obj->resv, NULL);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 486051ed681d..8bd0c4966913 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -576,25 +576,25 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
576} 576}
577 577
578static inline int 578static inline int
579mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) 579mi_set_context(struct drm_i915_gem_request *req, u32 flags)
580{ 580{
581 struct drm_i915_private *dev_priv = req->i915; 581 struct drm_i915_private *dev_priv = req->i915;
582 struct intel_engine_cs *engine = req->engine; 582 struct intel_engine_cs *engine = req->engine;
583 enum intel_engine_id id; 583 enum intel_engine_id id;
584 u32 *cs, flags = hw_flags | MI_MM_SPACE_GTT;
585 const int num_rings = 584 const int num_rings =
586 /* Use an extended w/a on ivb+ if signalling from other rings */ 585 /* Use an extended w/a on gen7 if signalling from other rings */
587 i915.semaphores ? 586 (i915.semaphores && INTEL_GEN(dev_priv) == 7) ?
588 INTEL_INFO(dev_priv)->num_rings - 1 : 587 INTEL_INFO(dev_priv)->num_rings - 1 :
589 0; 588 0;
590 int len; 589 int len;
590 u32 *cs;
591 591
592 /* These flags are for resource streamer on HSW+ */ 592 flags |= MI_MM_SPACE_GTT;
593 if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8) 593 if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
594 flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN); 594 /* These flags are for resource streamer on HSW+ */
595 else if (INTEL_GEN(dev_priv) < 8) 595 flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
596 flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); 596 else
597 597 flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
598 598
599 len = 4; 599 len = 4;
600 if (INTEL_GEN(dev_priv) >= 7) 600 if (INTEL_GEN(dev_priv) >= 7)
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 11898cd97596..f225bf680b6d 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -200,10 +200,10 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
200 .map_dma_buf = i915_gem_map_dma_buf, 200 .map_dma_buf = i915_gem_map_dma_buf,
201 .unmap_dma_buf = i915_gem_unmap_dma_buf, 201 .unmap_dma_buf = i915_gem_unmap_dma_buf,
202 .release = drm_gem_dmabuf_release, 202 .release = drm_gem_dmabuf_release,
203 .kmap = i915_gem_dmabuf_kmap, 203 .map = i915_gem_dmabuf_kmap,
204 .kmap_atomic = i915_gem_dmabuf_kmap_atomic, 204 .map_atomic = i915_gem_dmabuf_kmap_atomic,
205 .kunmap = i915_gem_dmabuf_kunmap, 205 .unmap = i915_gem_dmabuf_kunmap,
206 .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic, 206 .unmap_atomic = i915_gem_dmabuf_kunmap_atomic,
207 .mmap = i915_gem_dmabuf_mmap, 207 .mmap = i915_gem_dmabuf_mmap,
208 .vmap = i915_gem_dmabuf_vmap, 208 .vmap = i915_gem_dmabuf_vmap,
209 .vunmap = i915_gem_dmabuf_vunmap, 209 .vunmap = i915_gem_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 2da3a94fc9f3..51e365f70464 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -196,7 +196,6 @@ search_again:
196 if (ret) 196 if (ret)
197 return ret; 197 return ret;
198 198
199 i915_gem_retire_requests(dev_priv);
200 goto search_again; 199 goto search_again;
201 200
202found: 201found:
@@ -383,7 +382,6 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
383 if (ret) 382 if (ret)
384 return ret; 383 return ret;
385 384
386 i915_gem_retire_requests(dev_priv);
387 WARN_ON(!list_empty(&vm->active_list)); 385 WARN_ON(!list_empty(&vm->active_list));
388 } 386 }
389 387
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index dd7181ed5eca..a3e59c8ef27b 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -890,6 +890,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
890 struct list_head ordered_vmas; 890 struct list_head ordered_vmas;
891 struct list_head pinned_vmas; 891 struct list_head pinned_vmas;
892 bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4; 892 bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
893 bool needs_unfenced_map = INTEL_INFO(engine->i915)->unfenced_needs_alignment;
893 int retry; 894 int retry;
894 895
895 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; 896 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
@@ -910,7 +911,8 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
910 if (!has_fenced_gpu_access) 911 if (!has_fenced_gpu_access)
911 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE; 912 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
912 need_fence = 913 need_fence =
913 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 914 (entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
915 needs_unfenced_map) &&
914 i915_gem_object_is_tiled(obj); 916 i915_gem_object_is_tiled(obj);
915 need_mappable = need_fence || need_reloc_mappable(vma); 917 need_mappable = need_fence || need_reloc_mappable(vma);
916 918
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index cee9c4fec52a..8bab4aea63e6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2364,7 +2364,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2364 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2364 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2365 2365
2366 if (unlikely(ggtt->do_idle_maps)) { 2366 if (unlikely(ggtt->do_idle_maps)) {
2367 if (i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED)) { 2367 if (i915_gem_wait_for_idle(dev_priv, 0)) {
2368 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); 2368 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2369 /* Wait a bit, in hopes it avoids the hang */ 2369 /* Wait a bit, in hopes it avoids the hang */
2370 udelay(10); 2370 udelay(10);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 0e8d1010cecb..5ddbc9499775 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -37,6 +37,17 @@ static const char *i915_fence_get_driver_name(struct dma_fence *fence)
37 37
38static const char *i915_fence_get_timeline_name(struct dma_fence *fence) 38static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
39{ 39{
40 /* The timeline struct (as part of the ppgtt underneath a context)
41 * may be freed when the request is no longer in use by the GPU.
42 * We could extend the life of a context to beyond that of all
43 * fences, possibly keeping the hw resource around indefinitely,
44 * or we just give them a false name. Since
45 * dma_fence_ops.get_timeline_name is a debug feature, the occasional
46 * lie seems justifiable.
47 */
48 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
49 return "signaled";
50
40 return to_request(fence)->timeline->common->name; 51 return to_request(fence)->timeline->common->name;
41} 52}
42 53
@@ -180,7 +191,6 @@ i915_priotree_init(struct i915_priotree *pt)
180 191
181static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) 192static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
182{ 193{
183 struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
184 struct intel_engine_cs *engine; 194 struct intel_engine_cs *engine;
185 enum intel_engine_id id; 195 enum intel_engine_id id;
186 int ret; 196 int ret;
@@ -192,15 +202,10 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
192 if (ret) 202 if (ret)
193 return ret; 203 return ret;
194 204
195 i915_gem_retire_requests(i915);
196 GEM_BUG_ON(i915->gt.active_requests > 1);
197
198 /* If the seqno wraps around, we need to clear the breadcrumb rbtree */ 205 /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
199 for_each_engine(engine, i915, id) { 206 for_each_engine(engine, i915, id) {
200 struct intel_timeline *tl = &timeline->engine[id]; 207 struct i915_gem_timeline *timeline;
201 208 struct intel_timeline *tl = engine->timeline;
202 if (wait_for(intel_engine_is_idle(engine), 50))
203 return -EBUSY;
204 209
205 if (!i915_seqno_passed(seqno, tl->seqno)) { 210 if (!i915_seqno_passed(seqno, tl->seqno)) {
206 /* spin until threads are complete */ 211 /* spin until threads are complete */
@@ -211,14 +216,10 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
211 /* Finally reset hw state */ 216 /* Finally reset hw state */
212 tl->seqno = seqno; 217 tl->seqno = seqno;
213 intel_engine_init_global_seqno(engine, seqno); 218 intel_engine_init_global_seqno(engine, seqno);
214 }
215
216 list_for_each_entry(timeline, &i915->gt.timelines, link) {
217 for_each_engine(engine, i915, id) {
218 struct intel_timeline *tl = &timeline->engine[id];
219 219
220 memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno)); 220 list_for_each_entry(timeline, &i915->gt.timelines, link)
221 } 221 memset(timeline->engine[id].sync_seqno, 0,
222 sizeof(timeline->engine[id].sync_seqno));
222 } 223 }
223 224
224 return 0; 225 return 0;
@@ -295,7 +296,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
295 * completion order. 296 * completion order.
296 */ 297 */
297 list_del(&request->ring_link); 298 list_del(&request->ring_link);
298 request->ring->last_retired_head = request->postfix; 299 request->ring->head = request->postfix;
299 if (!--request->i915->gt.active_requests) { 300 if (!--request->i915->gt.active_requests) {
300 GEM_BUG_ON(!request->i915->gt.awake); 301 GEM_BUG_ON(!request->i915->gt.awake);
301 mod_delayed_work(request->i915->wq, 302 mod_delayed_work(request->i915->wq,
@@ -651,6 +652,9 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
651 652
652 GEM_BUG_ON(to == from); 653 GEM_BUG_ON(to == from);
653 654
655 if (i915_gem_request_completed(from))
656 return 0;
657
654 if (to->engine->schedule) { 658 if (to->engine->schedule) {
655 ret = i915_priotree_add_dependency(to->i915, 659 ret = i915_priotree_add_dependency(to->i915,
656 &to->priotree, 660 &to->priotree,
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 2978acdd995e..129ed303a6c4 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -53,6 +53,17 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
53 BUG(); 53 BUG();
54} 54}
55 55
56static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock)
57{
58 if (!unlock)
59 return;
60
61 mutex_unlock(&dev->struct_mutex);
62
63 /* expedite the RCU grace period to free some request slabs */
64 synchronize_rcu_expedited();
65}
66
56static bool any_vma_pinned(struct drm_i915_gem_object *obj) 67static bool any_vma_pinned(struct drm_i915_gem_object *obj)
57{ 68{
58 struct i915_vma *vma; 69 struct i915_vma *vma;
@@ -232,11 +243,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
232 intel_runtime_pm_put(dev_priv); 243 intel_runtime_pm_put(dev_priv);
233 244
234 i915_gem_retire_requests(dev_priv); 245 i915_gem_retire_requests(dev_priv);
235 if (unlock)
236 mutex_unlock(&dev_priv->drm.struct_mutex);
237 246
238 /* expedite the RCU grace period to free some request slabs */ 247 i915_gem_shrinker_unlock(&dev_priv->drm, unlock);
239 synchronize_rcu_expedited();
240 248
241 return count; 249 return count;
242} 250}
@@ -296,8 +304,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
296 count += obj->base.size >> PAGE_SHIFT; 304 count += obj->base.size >> PAGE_SHIFT;
297 } 305 }
298 306
299 if (unlock) 307 i915_gem_shrinker_unlock(dev, unlock);
300 mutex_unlock(&dev->struct_mutex);
301 308
302 return count; 309 return count;
303} 310}
@@ -324,8 +331,8 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
324 sc->nr_to_scan - freed, 331 sc->nr_to_scan - freed,
325 I915_SHRINK_BOUND | 332 I915_SHRINK_BOUND |
326 I915_SHRINK_UNBOUND); 333 I915_SHRINK_UNBOUND);
327 if (unlock) 334
328 mutex_unlock(&dev->struct_mutex); 335 i915_gem_shrinker_unlock(dev, unlock);
329 336
330 return freed; 337 return freed;
331} 338}
@@ -367,8 +374,7 @@ i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
367 struct shrinker_lock_uninterruptible *slu) 374 struct shrinker_lock_uninterruptible *slu)
368{ 375{
369 dev_priv->mm.interruptible = slu->was_interruptible; 376 dev_priv->mm.interruptible = slu->was_interruptible;
370 if (slu->unlock) 377 i915_gem_shrinker_unlock(&dev_priv->drm, slu->unlock);
371 mutex_unlock(&dev_priv->drm.struct_mutex);
372} 378}
373 379
374static int 380static int
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 832ac9e45801..1642fff9cf13 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -30,16 +30,25 @@
30/** 30/**
31 * DOC: GuC-based command submission 31 * DOC: GuC-based command submission
32 * 32 *
33 * i915_guc_client: 33 * GuC client:
34 * We use the term client to avoid confusion with contexts. A i915_guc_client is 34 * A i915_guc_client refers to a submission path through GuC. Currently, there
35 * equivalent to GuC object guc_context_desc. This context descriptor is 35 * is only one of these (the execbuf_client) and this one is charged with all
36 * allocated from a pool of 1024 entries. Kernel driver will allocate doorbell 36 * submissions to the GuC. This struct is the owner of a doorbell, a process
37 * and workqueue for it. Also the process descriptor (guc_process_desc), which 37 * descriptor and a workqueue (all of them inside a single gem object that
38 * is mapped to client space. So the client can write Work Item then ring the 38 * contains all required pages for these elements).
39 * doorbell.
40 * 39 *
41 * To simplify the implementation, we allocate one gem object that contains all 40 * GuC stage descriptor:
42 * pages for doorbell, process descriptor and workqueue. 41 * During initialization, the driver allocates a static pool of 1024 such
42 * descriptors, and shares them with the GuC.
43 * Currently, there exists a 1:1 mapping between a i915_guc_client and a
44 * guc_stage_desc (via the client's stage_id), so effectively only one
45 * gets used. This stage descriptor lets the GuC know about the doorbell,
46 * workqueue and process descriptor. Theoretically, it also lets the GuC
47 * know about our HW contexts (context ID, etc...), but we actually
48 * employ a kind of submission where the GuC uses the LRCA sent via the work
49 * item instead (the single guc_stage_desc associated to execbuf client
50 * contains information about the default kernel context only, but this is
51 * essentially unused). This is called a "proxy" submission.
43 * 52 *
44 * The Scratch registers: 53 * The Scratch registers:
45 * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes 54 * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
@@ -62,34 +71,91 @@
62 * ELSP context descriptor dword into Work Item. 71 * ELSP context descriptor dword into Work Item.
63 * See guc_wq_item_append() 72 * See guc_wq_item_append()
64 * 73 *
74 * ADS:
75 * The Additional Data Struct (ADS) has pointers for different buffers used by
76 * the GuC. One single gem object contains the ADS struct itself (guc_ads), the
77 * scheduling policies (guc_policies), a structure describing a collection of
78 * register sets (guc_mmio_reg_state) and some extra pages for the GuC to save
79 * its internal state for sleep.
80 *
65 */ 81 */
66 82
83static inline bool is_high_priority(struct i915_guc_client* client)
84{
85 return client->priority <= GUC_CLIENT_PRIORITY_HIGH;
86}
87
88static int __reserve_doorbell(struct i915_guc_client *client)
89{
90 unsigned long offset;
91 unsigned long end;
92 u16 id;
93
94 GEM_BUG_ON(client->doorbell_id != GUC_DOORBELL_INVALID);
95
96 /*
97 * The bitmap tracks which doorbell registers are currently in use.
98 * It is split into two halves; the first half is used for normal
99 * priority contexts, the second half for high-priority ones.
100 */
101 offset = 0;
102 end = GUC_NUM_DOORBELLS/2;
103 if (is_high_priority(client)) {
104 offset = end;
105 end += offset;
106 }
107
108 id = find_next_zero_bit(client->guc->doorbell_bitmap, offset, end);
109 if (id == end)
110 return -ENOSPC;
111
112 __set_bit(id, client->guc->doorbell_bitmap);
113 client->doorbell_id = id;
114 DRM_DEBUG_DRIVER("client %u (high prio=%s) reserved doorbell: %d\n",
115 client->stage_id, yesno(is_high_priority(client)),
116 id);
117 return 0;
118}
119
120static void __unreserve_doorbell(struct i915_guc_client *client)
121{
122 GEM_BUG_ON(client->doorbell_id == GUC_DOORBELL_INVALID);
123
124 __clear_bit(client->doorbell_id, client->guc->doorbell_bitmap);
125 client->doorbell_id = GUC_DOORBELL_INVALID;
126}
127
67/* 128/*
68 * Tell the GuC to allocate or deallocate a specific doorbell 129 * Tell the GuC to allocate or deallocate a specific doorbell
69 */ 130 */
70 131
71static int guc_allocate_doorbell(struct intel_guc *guc, 132static int __guc_allocate_doorbell(struct intel_guc *guc, u32 stage_id)
72 struct i915_guc_client *client)
73{ 133{
74 u32 action[] = { 134 u32 action[] = {
75 INTEL_GUC_ACTION_ALLOCATE_DOORBELL, 135 INTEL_GUC_ACTION_ALLOCATE_DOORBELL,
76 client->ctx_index 136 stage_id
77 }; 137 };
78 138
79 return intel_guc_send(guc, action, ARRAY_SIZE(action)); 139 return intel_guc_send(guc, action, ARRAY_SIZE(action));
80} 140}
81 141
82static int guc_release_doorbell(struct intel_guc *guc, 142static int __guc_deallocate_doorbell(struct intel_guc *guc, u32 stage_id)
83 struct i915_guc_client *client)
84{ 143{
85 u32 action[] = { 144 u32 action[] = {
86 INTEL_GUC_ACTION_DEALLOCATE_DOORBELL, 145 INTEL_GUC_ACTION_DEALLOCATE_DOORBELL,
87 client->ctx_index 146 stage_id
88 }; 147 };
89 148
90 return intel_guc_send(guc, action, ARRAY_SIZE(action)); 149 return intel_guc_send(guc, action, ARRAY_SIZE(action));
91} 150}
92 151
152static struct guc_stage_desc *__get_stage_desc(struct i915_guc_client *client)
153{
154 struct guc_stage_desc *base = client->guc->stage_desc_pool_vaddr;
155
156 return &base[client->stage_id];
157}
158
93/* 159/*
94 * Initialise, update, or clear doorbell data shared with the GuC 160 * Initialise, update, or clear doorbell data shared with the GuC
95 * 161 *
@@ -97,107 +163,129 @@ static int guc_release_doorbell(struct intel_guc *guc,
97 * client object which contains the page being used for the doorbell 163 * client object which contains the page being used for the doorbell
98 */ 164 */
99 165
100static int guc_update_doorbell_id(struct intel_guc *guc, 166static void __update_doorbell_desc(struct i915_guc_client *client, u16 new_id)
101 struct i915_guc_client *client,
102 u16 new_id)
103{ 167{
104 struct sg_table *sg = guc->ctx_pool_vma->pages; 168 struct guc_stage_desc *desc;
105 void *doorbell_bitmap = guc->doorbell_bitmap;
106 struct guc_doorbell_info *doorbell;
107 struct guc_context_desc desc;
108 size_t len;
109 169
110 doorbell = client->vaddr + client->doorbell_offset; 170 /* Update the GuC's idea of the doorbell ID */
171 desc = __get_stage_desc(client);
172 desc->db_id = new_id;
173}
111 174
112 if (client->doorbell_id != GUC_INVALID_DOORBELL_ID && 175static struct guc_doorbell_info *__get_doorbell(struct i915_guc_client *client)
113 test_bit(client->doorbell_id, doorbell_bitmap)) { 176{
114 /* Deactivate the old doorbell */ 177 return client->vaddr + client->doorbell_offset;
115 doorbell->db_status = GUC_DOORBELL_DISABLED; 178}
116 (void)guc_release_doorbell(guc, client);
117 __clear_bit(client->doorbell_id, doorbell_bitmap);
118 }
119 179
120 /* Update the GuC's idea of the doorbell ID */ 180static bool has_doorbell(struct i915_guc_client *client)
121 len = sg_pcopy_to_buffer(sg->sgl, sg->nents, &desc, sizeof(desc), 181{
122 sizeof(desc) * client->ctx_index); 182 if (client->doorbell_id == GUC_DOORBELL_INVALID)
123 if (len != sizeof(desc)) 183 return false;
124 return -EFAULT;
125 desc.db_id = new_id;
126 len = sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
127 sizeof(desc) * client->ctx_index);
128 if (len != sizeof(desc))
129 return -EFAULT;
130
131 client->doorbell_id = new_id;
132 if (new_id == GUC_INVALID_DOORBELL_ID)
133 return 0;
134 184
135 /* Activate the new doorbell */ 185 return test_bit(client->doorbell_id, client->guc->doorbell_bitmap);
136 __set_bit(new_id, doorbell_bitmap); 186}
187
188static int __create_doorbell(struct i915_guc_client *client)
189{
190 struct guc_doorbell_info *doorbell;
191 int err;
192
193 doorbell = __get_doorbell(client);
137 doorbell->db_status = GUC_DOORBELL_ENABLED; 194 doorbell->db_status = GUC_DOORBELL_ENABLED;
138 doorbell->cookie = client->doorbell_cookie; 195 doorbell->cookie = client->doorbell_cookie;
139 return guc_allocate_doorbell(guc, client); 196
197 err = __guc_allocate_doorbell(client->guc, client->stage_id);
198 if (err) {
199 doorbell->db_status = GUC_DOORBELL_DISABLED;
200 doorbell->cookie = 0;
201 }
202 return err;
140} 203}
141 204
142static void guc_disable_doorbell(struct intel_guc *guc, 205static int __destroy_doorbell(struct i915_guc_client *client)
143 struct i915_guc_client *client)
144{ 206{
145 (void)guc_update_doorbell_id(guc, client, GUC_INVALID_DOORBELL_ID); 207 struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
208 struct guc_doorbell_info *doorbell;
209 u16 db_id = client->doorbell_id;
146 210
147 /* XXX: wait for any interrupts */ 211 GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID);
148 /* XXX: wait for workqueue to drain */ 212
213 doorbell = __get_doorbell(client);
214 doorbell->db_status = GUC_DOORBELL_DISABLED;
215 doorbell->cookie = 0;
216
217 /* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit
218 * to go to zero after updating db_status before we call the GuC to
219 * release the doorbell */
220 if (wait_for_us(!(I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID), 10))
221 WARN_ONCE(true, "Doorbell never became invalid after disable\n");
222
223 return __guc_deallocate_doorbell(client->guc, client->stage_id);
149} 224}
150 225
151static uint16_t 226static int create_doorbell(struct i915_guc_client *client)
152select_doorbell_register(struct intel_guc *guc, uint32_t priority)
153{ 227{
154 /* 228 int ret;
155 * The bitmap tracks which doorbell registers are currently in use.
156 * It is split into two halves; the first half is used for normal
157 * priority contexts, the second half for high-priority ones.
158 * Note that logically higher priorities are numerically less than
159 * normal ones, so the test below means "is it high-priority?"
160 */
161 const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH);
162 const uint16_t half = GUC_MAX_DOORBELLS / 2;
163 const uint16_t start = hi_pri ? half : 0;
164 const uint16_t end = start + half;
165 uint16_t id;
166 229
167 id = find_next_zero_bit(guc->doorbell_bitmap, end, start); 230 ret = __reserve_doorbell(client);
168 if (id == end) 231 if (ret)
169 id = GUC_INVALID_DOORBELL_ID; 232 return ret;
170 233
171 DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n", 234 __update_doorbell_desc(client, client->doorbell_id);
172 hi_pri ? "high" : "normal", id); 235
236 ret = __create_doorbell(client);
237 if (ret)
238 goto err;
239
240 return 0;
173 241
174 return id; 242err:
243 __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
244 __unreserve_doorbell(client);
245 return ret;
175} 246}
176 247
177/* 248static int destroy_doorbell(struct i915_guc_client *client)
178 * Select, assign and relase doorbell cachelines 249{
179 * 250 int err;
180 * These functions track which doorbell cachelines are in use. 251
181 * The data they manipulate is protected by the intel_guc_send lock. 252 GEM_BUG_ON(!has_doorbell(client));
182 */ 253
254 /* XXX: wait for any interrupts */
255 /* XXX: wait for workqueue to drain */
256
257 err = __destroy_doorbell(client);
258 if (err)
259 return err;
260
261 __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
262
263 __unreserve_doorbell(client);
183 264
184static uint32_t select_doorbell_cacheline(struct intel_guc *guc) 265 return 0;
266}
267
268static unsigned long __select_cacheline(struct intel_guc* guc)
185{ 269{
186 const uint32_t cacheline_size = cache_line_size(); 270 unsigned long offset;
187 uint32_t offset;
188 271
189 /* Doorbell uses a single cache line within a page */ 272 /* Doorbell uses a single cache line within a page */
190 offset = offset_in_page(guc->db_cacheline); 273 offset = offset_in_page(guc->db_cacheline);
191 274
192 /* Moving to next cache line to reduce contention */ 275 /* Moving to next cache line to reduce contention */
193 guc->db_cacheline += cacheline_size; 276 guc->db_cacheline += cache_line_size();
194
195 DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n",
196 offset, guc->db_cacheline, cacheline_size);
197 277
278 DRM_DEBUG_DRIVER("reserved cacheline 0x%lx, next 0x%x, linesize %u\n",
279 offset, guc->db_cacheline, cache_line_size());
198 return offset; 280 return offset;
199} 281}
200 282
283static inline struct guc_process_desc *
284__get_process_desc(struct i915_guc_client *client)
285{
286 return client->vaddr + client->proc_desc_offset;
287}
288
201/* 289/*
202 * Initialise the process descriptor shared with the GuC firmware. 290 * Initialise the process descriptor shared with the GuC firmware.
203 */ 291 */
@@ -206,9 +294,7 @@ static void guc_proc_desc_init(struct intel_guc *guc,
206{ 294{
207 struct guc_process_desc *desc; 295 struct guc_process_desc *desc;
208 296
209 desc = client->vaddr + client->proc_desc_offset; 297 desc = memset(__get_process_desc(client), 0, sizeof(*desc));
210
211 memset(desc, 0, sizeof(*desc));
212 298
213 /* 299 /*
214 * XXX: pDoorbell and WQVBaseAddress are pointers in process address 300 * XXX: pDoorbell and WQVBaseAddress are pointers in process address
@@ -219,42 +305,41 @@ static void guc_proc_desc_init(struct intel_guc *guc,
219 desc->wq_base_addr = 0; 305 desc->wq_base_addr = 0;
220 desc->db_base_addr = 0; 306 desc->db_base_addr = 0;
221 307
222 desc->context_id = client->ctx_index; 308 desc->stage_id = client->stage_id;
223 desc->wq_size_bytes = client->wq_size; 309 desc->wq_size_bytes = client->wq_size;
224 desc->wq_status = WQ_STATUS_ACTIVE; 310 desc->wq_status = WQ_STATUS_ACTIVE;
225 desc->priority = client->priority; 311 desc->priority = client->priority;
226} 312}
227 313
228/* 314/*
229 * Initialise/clear the context descriptor shared with the GuC firmware. 315 * Initialise/clear the stage descriptor shared with the GuC firmware.
230 * 316 *
231 * This descriptor tells the GuC where (in GGTT space) to find the important 317 * This descriptor tells the GuC where (in GGTT space) to find the important
232 * data structures relating to this client (doorbell, process descriptor, 318 * data structures relating to this client (doorbell, process descriptor,
233 * write queue, etc). 319 * write queue, etc).
234 */ 320 */
235 321static void guc_stage_desc_init(struct intel_guc *guc,
236static void guc_ctx_desc_init(struct intel_guc *guc, 322 struct i915_guc_client *client)
237 struct i915_guc_client *client)
238{ 323{
239 struct drm_i915_private *dev_priv = guc_to_i915(guc); 324 struct drm_i915_private *dev_priv = guc_to_i915(guc);
240 struct intel_engine_cs *engine; 325 struct intel_engine_cs *engine;
241 struct i915_gem_context *ctx = client->owner; 326 struct i915_gem_context *ctx = client->owner;
242 struct guc_context_desc desc; 327 struct guc_stage_desc *desc;
243 struct sg_table *sg;
244 unsigned int tmp; 328 unsigned int tmp;
245 u32 gfx_addr; 329 u32 gfx_addr;
246 330
247 memset(&desc, 0, sizeof(desc)); 331 desc = __get_stage_desc(client);
332 memset(desc, 0, sizeof(*desc));
248 333
249 desc.attribute = GUC_CTX_DESC_ATTR_ACTIVE | GUC_CTX_DESC_ATTR_KERNEL; 334 desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE | GUC_STAGE_DESC_ATTR_KERNEL;
250 desc.context_id = client->ctx_index; 335 desc->stage_id = client->stage_id;
251 desc.priority = client->priority; 336 desc->priority = client->priority;
252 desc.db_id = client->doorbell_id; 337 desc->db_id = client->doorbell_id;
253 338
254 for_each_engine_masked(engine, dev_priv, client->engines, tmp) { 339 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
255 struct intel_context *ce = &ctx->engine[engine->id]; 340 struct intel_context *ce = &ctx->engine[engine->id];
256 uint32_t guc_engine_id = engine->guc_id; 341 uint32_t guc_engine_id = engine->guc_id;
257 struct guc_execlist_context *lrc = &desc.lrc[guc_engine_id]; 342 struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
258 343
259 /* TODO: We have a design issue to be solved here. Only when we 344 /* TODO: We have a design issue to be solved here. Only when we
260 * receive the first batch, we know which engine is used by the 345 * receive the first batch, we know which engine is used by the
@@ -266,12 +351,22 @@ static void guc_ctx_desc_init(struct intel_guc *guc,
266 if (!ce->state) 351 if (!ce->state)
267 break; /* XXX: continue? */ 352 break; /* XXX: continue? */
268 353
354 /*
355 * XXX: When this is a GUC_STAGE_DESC_ATTR_KERNEL client (proxy
356 * submission or, in other words, not using a direct submission
357 * model) the KMD's LRCA is not used for any work submission.
358 * Instead, the GuC uses the LRCA of the user mode context (see
359 * guc_wq_item_append below).
360 */
269 lrc->context_desc = lower_32_bits(ce->lrc_desc); 361 lrc->context_desc = lower_32_bits(ce->lrc_desc);
270 362
271 /* The state page is after PPHWSP */ 363 /* The state page is after PPHWSP */
272 lrc->ring_lcra = 364 lrc->ring_lrca =
273 guc_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE; 365 guc_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
274 lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) | 366
367 /* XXX: In direct submission, the GuC wants the HW context id
368 * here. In proxy submission, it wants the stage id */
369 lrc->context_id = (client->stage_id << GUC_ELC_CTXID_OFFSET) |
275 (guc_engine_id << GUC_ELC_ENGINE_OFFSET); 370 (guc_engine_id << GUC_ELC_ENGINE_OFFSET);
276 371
277 lrc->ring_begin = guc_ggtt_offset(ce->ring->vma); 372 lrc->ring_begin = guc_ggtt_offset(ce->ring->vma);
@@ -279,50 +374,36 @@ static void guc_ctx_desc_init(struct intel_guc *guc,
279 lrc->ring_next_free_location = lrc->ring_begin; 374 lrc->ring_next_free_location = lrc->ring_begin;
280 lrc->ring_current_tail_pointer_value = 0; 375 lrc->ring_current_tail_pointer_value = 0;
281 376
282 desc.engines_used |= (1 << guc_engine_id); 377 desc->engines_used |= (1 << guc_engine_id);
283 } 378 }
284 379
285 DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n", 380 DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
286 client->engines, desc.engines_used); 381 client->engines, desc->engines_used);
287 WARN_ON(desc.engines_used == 0); 382 WARN_ON(desc->engines_used == 0);
288 383
289 /* 384 /*
290 * The doorbell, process descriptor, and workqueue are all parts 385 * The doorbell, process descriptor, and workqueue are all parts
291 * of the client object, which the GuC will reference via the GGTT 386 * of the client object, which the GuC will reference via the GGTT
292 */ 387 */
293 gfx_addr = guc_ggtt_offset(client->vma); 388 gfx_addr = guc_ggtt_offset(client->vma);
294 desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) + 389 desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
295 client->doorbell_offset; 390 client->doorbell_offset;
296 desc.db_trigger_cpu = 391 desc->db_trigger_cpu = (uintptr_t)__get_doorbell(client);
297 (uintptr_t)client->vaddr + client->doorbell_offset; 392 desc->db_trigger_uk = gfx_addr + client->doorbell_offset;
298 desc.db_trigger_uk = gfx_addr + client->doorbell_offset; 393 desc->process_desc = gfx_addr + client->proc_desc_offset;
299 desc.process_desc = gfx_addr + client->proc_desc_offset; 394 desc->wq_addr = gfx_addr + client->wq_offset;
300 desc.wq_addr = gfx_addr + client->wq_offset; 395 desc->wq_size = client->wq_size;
301 desc.wq_size = client->wq_size;
302
303 /*
304 * XXX: Take LRCs from an existing context if this is not an
305 * IsKMDCreatedContext client
306 */
307 desc.desc_private = (uintptr_t)client;
308 396
309 /* Pool context is pinned already */ 397 desc->desc_private = (uintptr_t)client;
310 sg = guc->ctx_pool_vma->pages;
311 sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
312 sizeof(desc) * client->ctx_index);
313} 398}
314 399
315static void guc_ctx_desc_fini(struct intel_guc *guc, 400static void guc_stage_desc_fini(struct intel_guc *guc,
316 struct i915_guc_client *client) 401 struct i915_guc_client *client)
317{ 402{
318 struct guc_context_desc desc; 403 struct guc_stage_desc *desc;
319 struct sg_table *sg;
320
321 memset(&desc, 0, sizeof(desc));
322 404
323 sg = guc->ctx_pool_vma->pages; 405 desc = __get_stage_desc(client);
324 sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc), 406 memset(desc, 0, sizeof(*desc));
325 sizeof(desc) * client->ctx_index);
326} 407}
327 408
328/** 409/**
@@ -345,8 +426,7 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
345{ 426{
346 const size_t wqi_size = sizeof(struct guc_wq_item); 427 const size_t wqi_size = sizeof(struct guc_wq_item);
347 struct i915_guc_client *client = request->i915->guc.execbuf_client; 428 struct i915_guc_client *client = request->i915->guc.execbuf_client;
348 struct guc_process_desc *desc = client->vaddr + 429 struct guc_process_desc *desc = __get_process_desc(client);
349 client->proc_desc_offset;
350 u32 freespace; 430 u32 freespace;
351 int ret; 431 int ret;
352 432
@@ -391,19 +471,17 @@ static void guc_wq_item_append(struct i915_guc_client *client,
391 const size_t wqi_size = sizeof(struct guc_wq_item); 471 const size_t wqi_size = sizeof(struct guc_wq_item);
392 const u32 wqi_len = wqi_size/sizeof(u32) - 1; 472 const u32 wqi_len = wqi_size/sizeof(u32) - 1;
393 struct intel_engine_cs *engine = rq->engine; 473 struct intel_engine_cs *engine = rq->engine;
394 struct guc_process_desc *desc; 474 struct guc_process_desc *desc = __get_process_desc(client);
395 struct guc_wq_item *wqi; 475 struct guc_wq_item *wqi;
396 u32 freespace, tail, wq_off; 476 u32 freespace, tail, wq_off;
397 477
398 desc = client->vaddr + client->proc_desc_offset;
399
400 /* Free space is guaranteed, see i915_guc_wq_reserve() above */ 478 /* Free space is guaranteed, see i915_guc_wq_reserve() above */
401 freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size); 479 freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size);
402 GEM_BUG_ON(freespace < wqi_size); 480 GEM_BUG_ON(freespace < wqi_size);
403 481
404 /* The GuC firmware wants the tail index in QWords, not bytes */ 482 /* The GuC firmware wants the tail index in QWords, not bytes */
405 tail = rq->tail; 483 tail = rq->tail;
406 GEM_BUG_ON(tail & 7); 484 assert_ring_tail_valid(rq->ring, rq->tail);
407 tail >>= 3; 485 tail >>= 3;
408 GEM_BUG_ON(tail > WQ_RING_TAIL_MAX); 486 GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
409 487
@@ -436,19 +514,27 @@ static void guc_wq_item_append(struct i915_guc_client *client,
436 /* The GuC wants only the low-order word of the context descriptor */ 514 /* The GuC wants only the low-order word of the context descriptor */
437 wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine); 515 wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine);
438 516
439 wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT; 517 wqi->submit_element_info = tail << WQ_RING_TAIL_SHIFT;
440 wqi->fence_id = rq->global_seqno; 518 wqi->fence_id = rq->global_seqno;
441} 519}
442 520
521static void guc_reset_wq(struct i915_guc_client *client)
522{
523 struct guc_process_desc *desc = __get_process_desc(client);
524
525 desc->head = 0;
526 desc->tail = 0;
527
528 client->wq_tail = 0;
529}
530
443static int guc_ring_doorbell(struct i915_guc_client *client) 531static int guc_ring_doorbell(struct i915_guc_client *client)
444{ 532{
445 struct guc_process_desc *desc; 533 struct guc_process_desc *desc = __get_process_desc(client);
446 union guc_doorbell_qw db_cmp, db_exc, db_ret; 534 union guc_doorbell_qw db_cmp, db_exc, db_ret;
447 union guc_doorbell_qw *db; 535 union guc_doorbell_qw *db;
448 int attempt = 2, ret = -EAGAIN; 536 int attempt = 2, ret = -EAGAIN;
449 537
450 desc = client->vaddr + client->proc_desc_offset;
451
452 /* Update the tail so it is visible to GuC */ 538 /* Update the tail so it is visible to GuC */
453 desc->tail = client->wq_tail; 539 desc->tail = client->wq_tail;
454 540
@@ -463,7 +549,7 @@ static int guc_ring_doorbell(struct i915_guc_client *client)
463 db_exc.cookie = 1; 549 db_exc.cookie = 1;
464 550
465 /* pointer of current doorbell cacheline */ 551 /* pointer of current doorbell cacheline */
466 db = client->vaddr + client->doorbell_offset; 552 db = (union guc_doorbell_qw *)__get_doorbell(client);
467 553
468 while (attempt--) { 554 while (attempt--) {
469 /* lets ring the doorbell */ 555 /* lets ring the doorbell */
@@ -573,23 +659,10 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine)
573{ 659{
574 struct execlist_port *port = engine->execlist_port; 660 struct execlist_port *port = engine->execlist_port;
575 struct drm_i915_gem_request *last = port[0].request; 661 struct drm_i915_gem_request *last = port[0].request;
576 unsigned long flags;
577 struct rb_node *rb; 662 struct rb_node *rb;
578 bool submit = false; 663 bool submit = false;
579 664
580 /* After execlist_first is updated, the tasklet will be rescheduled. 665 spin_lock_irq(&engine->timeline->lock);
581 *
582 * If we are currently running (inside the tasklet) and a third
583 * party queues a request and so updates engine->execlist_first under
584 * the spinlock (which we have elided), it will atomically set the
585 * TASKLET_SCHED flag causing the us to be re-executed and pick up
586 * the change in state (the update to TASKLET_SCHED incurs a memory
587 * barrier making this cross-cpu checking safe).
588 */
589 if (!READ_ONCE(engine->execlist_first))
590 return false;
591
592 spin_lock_irqsave(&engine->timeline->lock, flags);
593 rb = engine->execlist_first; 666 rb = engine->execlist_first;
594 while (rb) { 667 while (rb) {
595 struct drm_i915_gem_request *rq = 668 struct drm_i915_gem_request *rq =
@@ -609,8 +682,8 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine)
609 RB_CLEAR_NODE(&rq->priotree.node); 682 RB_CLEAR_NODE(&rq->priotree.node);
610 rq->priotree.priority = INT_MAX; 683 rq->priotree.priority = INT_MAX;
611 684
612 trace_i915_gem_request_in(rq, port - engine->execlist_port);
613 i915_guc_submit(rq); 685 i915_guc_submit(rq);
686 trace_i915_gem_request_in(rq, port - engine->execlist_port);
614 last = rq; 687 last = rq;
615 submit = true; 688 submit = true;
616 } 689 }
@@ -619,7 +692,7 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine)
619 nested_enable_signaling(last); 692 nested_enable_signaling(last);
620 engine->execlist_first = rb; 693 engine->execlist_first = rb;
621 } 694 }
622 spin_unlock_irqrestore(&engine->timeline->lock, flags); 695 spin_unlock_irq(&engine->timeline->lock);
623 696
624 return submit; 697 return submit;
625} 698}
@@ -695,93 +768,100 @@ err:
695 return vma; 768 return vma;
696} 769}
697 770
698static void 771/* Check that a doorbell register is in the expected state */
699guc_client_free(struct drm_i915_private *dev_priv, 772static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
700 struct i915_guc_client *client)
701{ 773{
702 struct intel_guc *guc = &dev_priv->guc; 774 struct drm_i915_private *dev_priv = guc_to_i915(guc);
703 775 u32 drbregl;
704 if (!client) 776 bool valid;
705 return;
706
707 /*
708 * XXX: wait for any outstanding submissions before freeing memory.
709 * Be sure to drop any locks
710 */
711 777
712 if (client->vaddr) { 778 GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID);
713 /*
714 * If we got as far as setting up a doorbell, make sure we
715 * shut it down before unmapping & deallocating the memory.
716 */
717 guc_disable_doorbell(guc, client);
718 779
719 i915_gem_object_unpin_map(client->vma->obj); 780 drbregl = I915_READ(GEN8_DRBREGL(db_id));
720 } 781 valid = drbregl & GEN8_DRB_VALID;
721 782
722 i915_vma_unpin_and_release(&client->vma); 783 if (test_bit(db_id, guc->doorbell_bitmap) == valid)
784 return true;
723 785
724 if (client->ctx_index != GUC_INVALID_CTX_ID) { 786 DRM_DEBUG_DRIVER("Doorbell %d has unexpected state (0x%x): valid=%s\n",
725 guc_ctx_desc_fini(guc, client); 787 db_id, drbregl, yesno(valid));
726 ida_simple_remove(&guc->ctx_ids, client->ctx_index);
727 }
728 788
729 kfree(client); 789 return false;
730} 790}
731 791
732/* Check that a doorbell register is in the expected state */ 792/*
733static bool guc_doorbell_check(struct intel_guc *guc, uint16_t db_id) 793 * If the GuC thinks that the doorbell is unassigned (e.g. because we reset and
794 * reloaded the GuC FW) we can use this function to tell the GuC to reassign the
795 * doorbell to the rightful owner.
796 */
797static int __reset_doorbell(struct i915_guc_client* client, u16 db_id)
734{ 798{
735 struct drm_i915_private *dev_priv = guc_to_i915(guc); 799 int err;
736 i915_reg_t drbreg = GEN8_DRBREGL(db_id);
737 uint32_t value = I915_READ(drbreg);
738 bool enabled = (value & GUC_DOORBELL_ENABLED) != 0;
739 bool expected = test_bit(db_id, guc->doorbell_bitmap);
740
741 if (enabled == expected)
742 return true;
743 800
744 DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) 0x%x, should be %s\n", 801 __update_doorbell_desc(client, db_id);
745 db_id, drbreg.reg, value, 802 err = __create_doorbell(client);
746 expected ? "active" : "inactive"); 803 if (!err)
804 err = __destroy_doorbell(client);
747 805
748 return false; 806 return err;
749} 807}
750 808
751/* 809/*
752 * Borrow the first client to set up & tear down each unused doorbell 810 * Set up & tear down each unused doorbell in turn, to ensure that all doorbell
753 * in turn, to ensure that all doorbell h/w is (re)initialised. 811 * HW is (re)initialised. For that end, we might have to borrow the first
812 * client. Also, tell GuC about all the doorbells in use by all clients.
813 * We do this because the KMD, the GuC and the doorbell HW can easily go out of
814 * sync (e.g. we can reset the GuC, but not the doorbel HW).
754 */ 815 */
755static void guc_init_doorbell_hw(struct intel_guc *guc) 816static int guc_init_doorbell_hw(struct intel_guc *guc)
756{ 817{
757 struct i915_guc_client *client = guc->execbuf_client; 818 struct i915_guc_client *client = guc->execbuf_client;
758 uint16_t db_id; 819 bool recreate_first_client = false;
759 int i, err; 820 u16 db_id;
760 821 int ret;
761 guc_disable_doorbell(guc, client);
762 822
763 for (i = 0; i < GUC_MAX_DOORBELLS; ++i) { 823 /* For unused doorbells, make sure they are disabled */
764 /* Skip if doorbell is OK */ 824 for_each_clear_bit(db_id, guc->doorbell_bitmap, GUC_NUM_DOORBELLS) {
765 if (guc_doorbell_check(guc, i)) 825 if (doorbell_ok(guc, db_id))
766 continue; 826 continue;
767 827
768 err = guc_update_doorbell_id(guc, client, i); 828 if (has_doorbell(client)) {
769 if (err) 829 /* Borrow execbuf_client (we will recreate it later) */
770 DRM_DEBUG_DRIVER("Doorbell %d update failed, err %d\n", 830 destroy_doorbell(client);
771 i, err); 831 recreate_first_client = true;
832 }
833
834 ret = __reset_doorbell(client, db_id);
835 WARN(ret, "Doorbell %u reset failed, err %d\n", db_id, ret);
772 } 836 }
773 837
774 db_id = select_doorbell_register(guc, client->priority); 838 if (recreate_first_client) {
775 WARN_ON(db_id == GUC_INVALID_DOORBELL_ID); 839 ret = __reserve_doorbell(client);
840 if (unlikely(ret)) {
841 DRM_ERROR("Couldn't re-reserve first client db: %d\n", ret);
842 return ret;
843 }
844
845 __update_doorbell_desc(client, client->doorbell_id);
846 }
776 847
777 err = guc_update_doorbell_id(guc, client, db_id); 848 /* Now for every client (and not only execbuf_client) make sure their
778 if (err) 849 * doorbells are known by the GuC */
779 DRM_WARN("Failed to restore doorbell to %d, err %d\n", 850 //for (client = client_list; client != NULL; client = client->next)
780 db_id, err); 851 {
852 ret = __create_doorbell(client);
853 if (ret) {
854 DRM_ERROR("Couldn't recreate client %u doorbell: %d\n",
855 client->stage_id, ret);
856 return ret;
857 }
858 }
781 859
782 /* Read back & verify all doorbell registers */ 860 /* Read back & verify all (used & unused) doorbell registers */
783 for (i = 0; i < GUC_MAX_DOORBELLS; ++i) 861 for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
784 (void)guc_doorbell_check(guc, i); 862 WARN_ON(!doorbell_ok(guc, db_id));
863
864 return 0;
785} 865}
786 866
787/** 867/**
@@ -807,49 +887,46 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
807 struct intel_guc *guc = &dev_priv->guc; 887 struct intel_guc *guc = &dev_priv->guc;
808 struct i915_vma *vma; 888 struct i915_vma *vma;
809 void *vaddr; 889 void *vaddr;
810 uint16_t db_id; 890 int ret;
811 891
812 client = kzalloc(sizeof(*client), GFP_KERNEL); 892 client = kzalloc(sizeof(*client), GFP_KERNEL);
813 if (!client) 893 if (!client)
814 return NULL; 894 return ERR_PTR(-ENOMEM);
815 895
816 client->owner = ctx;
817 client->guc = guc; 896 client->guc = guc;
897 client->owner = ctx;
818 client->engines = engines; 898 client->engines = engines;
819 client->priority = priority; 899 client->priority = priority;
820 client->doorbell_id = GUC_INVALID_DOORBELL_ID; 900 client->doorbell_id = GUC_DOORBELL_INVALID;
901 client->wq_offset = GUC_DB_SIZE;
902 client->wq_size = GUC_WQ_SIZE;
903 spin_lock_init(&client->wq_lock);
821 904
822 client->ctx_index = (uint32_t)ida_simple_get(&guc->ctx_ids, 0, 905 ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS,
823 GUC_MAX_GPU_CONTEXTS, GFP_KERNEL); 906 GFP_KERNEL);
824 if (client->ctx_index >= GUC_MAX_GPU_CONTEXTS) { 907 if (ret < 0)
825 client->ctx_index = GUC_INVALID_CTX_ID; 908 goto err_client;
826 goto err; 909
827 } 910 client->stage_id = ret;
828 911
829 /* The first page is doorbell/proc_desc. Two followed pages are wq. */ 912 /* The first page is doorbell/proc_desc. Two followed pages are wq. */
830 vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE); 913 vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
831 if (IS_ERR(vma)) 914 if (IS_ERR(vma)) {
832 goto err; 915 ret = PTR_ERR(vma);
916 goto err_id;
917 }
833 918
834 /* We'll keep just the first (doorbell/proc) page permanently kmap'd. */ 919 /* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
835 client->vma = vma; 920 client->vma = vma;
836 921
837 vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); 922 vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
838 if (IS_ERR(vaddr)) 923 if (IS_ERR(vaddr)) {
839 goto err; 924 ret = PTR_ERR(vaddr);
840 925 goto err_vma;
926 }
841 client->vaddr = vaddr; 927 client->vaddr = vaddr;
842 928
843 spin_lock_init(&client->wq_lock); 929 client->doorbell_offset = __select_cacheline(guc);
844 client->wq_offset = GUC_DB_SIZE;
845 client->wq_size = GUC_WQ_SIZE;
846
847 db_id = select_doorbell_register(guc, client->priority);
848 if (db_id == GUC_INVALID_DOORBELL_ID)
849 /* XXX: evict a doorbell instead? */
850 goto err;
851
852 client->doorbell_offset = select_doorbell_cacheline(guc);
853 930
854 /* 931 /*
855 * Since the doorbell only requires a single cacheline, we can save 932 * Since the doorbell only requires a single cacheline, we can save
@@ -862,28 +939,47 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
862 client->proc_desc_offset = (GUC_DB_SIZE / 2); 939 client->proc_desc_offset = (GUC_DB_SIZE / 2);
863 940
864 guc_proc_desc_init(guc, client); 941 guc_proc_desc_init(guc, client);
865 guc_ctx_desc_init(guc, client); 942 guc_stage_desc_init(guc, client);
866 943
867 /* For runtime client allocation we need to enable the doorbell. Not 944 ret = create_doorbell(client);
868 * required yet for the static execbuf_client as this special kernel 945 if (ret)
869 * client is enabled from i915_guc_submission_enable(). 946 goto err_vaddr;
870 *
871 * guc_update_doorbell_id(guc, client, db_id);
872 */
873 947
874 DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: ctx_index %u\n", 948 DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n",
875 priority, client, client->engines, client->ctx_index); 949 priority, client, client->engines, client->stage_id);
876 DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%x\n", 950 DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n",
877 client->doorbell_id, client->doorbell_offset); 951 client->doorbell_id, client->doorbell_offset);
878 952
879 return client; 953 return client;
880 954
881err: 955err_vaddr:
882 guc_client_free(dev_priv, client); 956 i915_gem_object_unpin_map(client->vma->obj);
883 return NULL; 957err_vma:
958 i915_vma_unpin_and_release(&client->vma);
959err_id:
960 ida_simple_remove(&guc->stage_ids, client->stage_id);
961err_client:
962 kfree(client);
963 return ERR_PTR(ret);
884} 964}
885 965
966static void guc_client_free(struct i915_guc_client *client)
967{
968 /*
969 * XXX: wait for any outstanding submissions before freeing memory.
970 * Be sure to drop any locks
971 */
886 972
973 /* FIXME: in many cases, by the time we get here the GuC has been
974 * reset, so we cannot destroy the doorbell properly. Ignore the
975 * error message for now */
976 destroy_doorbell(client);
977 guc_stage_desc_fini(client->guc, client);
978 i915_gem_object_unpin_map(client->vma->obj);
979 i915_vma_unpin_and_release(&client->vma);
980 ida_simple_remove(&client->guc->stage_ids, client->stage_id);
981 kfree(client);
982}
887 983
888static void guc_policies_init(struct guc_policies *policies) 984static void guc_policies_init(struct guc_policies *policies)
889{ 985{
@@ -893,7 +989,7 @@ static void guc_policies_init(struct guc_policies *policies)
893 policies->dpc_promote_time = 500000; 989 policies->dpc_promote_time = 500000;
894 policies->max_num_work_items = POLICY_MAX_NUM_WI; 990 policies->max_num_work_items = POLICY_MAX_NUM_WI;
895 991
896 for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) { 992 for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) {
897 for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) { 993 for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
898 policy = &policies->policy[p][i]; 994 policy = &policies->policy[p][i];
899 995
@@ -907,7 +1003,7 @@ static void guc_policies_init(struct guc_policies *policies)
907 policies->is_valid = 1; 1003 policies->is_valid = 1;
908} 1004}
909 1005
910static void guc_addon_create(struct intel_guc *guc) 1006static int guc_ads_create(struct intel_guc *guc)
911{ 1007{
912 struct drm_i915_private *dev_priv = guc_to_i915(guc); 1008 struct drm_i915_private *dev_priv = guc_to_i915(guc);
913 struct i915_vma *vma; 1009 struct i915_vma *vma;
@@ -923,14 +1019,13 @@ static void guc_addon_create(struct intel_guc *guc)
923 enum intel_engine_id id; 1019 enum intel_engine_id id;
924 u32 base; 1020 u32 base;
925 1021
926 vma = guc->ads_vma; 1022 GEM_BUG_ON(guc->ads_vma);
927 if (!vma) {
928 vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob)));
929 if (IS_ERR(vma))
930 return;
931 1023
932 guc->ads_vma = vma; 1024 vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob)));
933 } 1025 if (IS_ERR(vma))
1026 return PTR_ERR(vma);
1027
1028 guc->ads_vma = vma;
934 1029
935 page = i915_vma_first_page(vma); 1030 page = i915_vma_first_page(vma);
936 blob = kmap(page); 1031 blob = kmap(page);
@@ -940,11 +1035,11 @@ static void guc_addon_create(struct intel_guc *guc)
940 1035
941 /* MMIO reg state */ 1036 /* MMIO reg state */
942 for_each_engine(engine, dev_priv, id) { 1037 for_each_engine(engine, dev_priv, id) {
943 blob->reg_state.mmio_white_list[engine->guc_id].mmio_start = 1038 blob->reg_state.white_list[engine->guc_id].mmio_start =
944 engine->mmio_base + GUC_MMIO_WHITE_LIST_START; 1039 engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
945 1040
946 /* Nothing to be saved or restored for now. */ 1041 /* Nothing to be saved or restored for now. */
947 blob->reg_state.mmio_white_list[engine->guc_id].count = 0; 1042 blob->reg_state.white_list[engine->guc_id].count = 0;
948 } 1043 }
949 1044
950 /* 1045 /*
@@ -967,67 +1062,75 @@ static void guc_addon_create(struct intel_guc *guc)
967 blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state); 1062 blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state);
968 1063
969 kunmap(page); 1064 kunmap(page);
1065
1066 return 0;
1067}
1068
1069static void guc_ads_destroy(struct intel_guc *guc)
1070{
1071 i915_vma_unpin_and_release(&guc->ads_vma);
970} 1072}
971 1073
972/* 1074/*
973 * Set up the memory resources to be shared with the GuC. At this point, 1075 * Set up the memory resources to be shared with the GuC (via the GGTT)
974 * we require just one object that can be mapped through the GGTT. 1076 * at firmware loading time.
975 */ 1077 */
976int i915_guc_submission_init(struct drm_i915_private *dev_priv) 1078int i915_guc_submission_init(struct drm_i915_private *dev_priv)
977{ 1079{
978 const size_t ctxsize = sizeof(struct guc_context_desc);
979 const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize;
980 const size_t gemsize = round_up(poolsize, PAGE_SIZE);
981 struct intel_guc *guc = &dev_priv->guc; 1080 struct intel_guc *guc = &dev_priv->guc;
982 struct i915_vma *vma; 1081 struct i915_vma *vma;
1082 void *vaddr;
1083 int ret;
983 1084
984 if (!HAS_GUC_SCHED(dev_priv)) 1085 if (guc->stage_desc_pool)
985 return 0; 1086 return 0;
986 1087
987 /* Wipe bitmap & delete client in case of reinitialisation */ 1088 vma = intel_guc_allocate_vma(guc,
988 bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS); 1089 PAGE_ALIGN(sizeof(struct guc_stage_desc) *
989 i915_guc_submission_disable(dev_priv); 1090 GUC_MAX_STAGE_DESCRIPTORS));
990
991 if (!i915.enable_guc_submission)
992 return 0; /* not enabled */
993
994 if (guc->ctx_pool_vma)
995 return 0; /* already allocated */
996
997 vma = intel_guc_allocate_vma(guc, gemsize);
998 if (IS_ERR(vma)) 1091 if (IS_ERR(vma))
999 return PTR_ERR(vma); 1092 return PTR_ERR(vma);
1000 1093
1001 guc->ctx_pool_vma = vma; 1094 guc->stage_desc_pool = vma;
1002 ida_init(&guc->ctx_ids); 1095
1003 intel_guc_log_create(guc); 1096 vaddr = i915_gem_object_pin_map(guc->stage_desc_pool->obj, I915_MAP_WB);
1004 guc_addon_create(guc); 1097 if (IS_ERR(vaddr)) {
1005 1098 ret = PTR_ERR(vaddr);
1006 guc->execbuf_client = guc_client_alloc(dev_priv, 1099 goto err_vma;
1007 INTEL_INFO(dev_priv)->ring_mask,
1008 GUC_CTX_PRIORITY_KMD_NORMAL,
1009 dev_priv->kernel_context);
1010 if (!guc->execbuf_client) {
1011 DRM_ERROR("Failed to create GuC client for execbuf!\n");
1012 goto err;
1013 } 1100 }
1014 1101
1102 guc->stage_desc_pool_vaddr = vaddr;
1103
1104 ret = intel_guc_log_create(guc);
1105 if (ret < 0)
1106 goto err_vaddr;
1107
1108 ret = guc_ads_create(guc);
1109 if (ret < 0)
1110 goto err_log;
1111
1112 ida_init(&guc->stage_ids);
1113
1015 return 0; 1114 return 0;
1016 1115
1017err: 1116err_log:
1018 i915_guc_submission_fini(dev_priv); 1117 intel_guc_log_destroy(guc);
1019 return -ENOMEM; 1118err_vaddr:
1119 i915_gem_object_unpin_map(guc->stage_desc_pool->obj);
1120err_vma:
1121 i915_vma_unpin_and_release(&guc->stage_desc_pool);
1122 return ret;
1020} 1123}
1021 1124
1022static void guc_reset_wq(struct i915_guc_client *client) 1125void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
1023{ 1126{
1024 struct guc_process_desc *desc = client->vaddr + 1127 struct intel_guc *guc = &dev_priv->guc;
1025 client->proc_desc_offset;
1026
1027 desc->head = 0;
1028 desc->tail = 0;
1029 1128
1030 client->wq_tail = 0; 1129 ida_destroy(&guc->stage_ids);
1130 guc_ads_destroy(guc);
1131 intel_guc_log_destroy(guc);
1132 i915_gem_object_unpin_map(guc->stage_desc_pool->obj);
1133 i915_vma_unpin_and_release(&guc->stage_desc_pool);
1031} 1134}
1032 1135
1033static void guc_interrupts_capture(struct drm_i915_private *dev_priv) 1136static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
@@ -1072,20 +1175,60 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
1072 dev_priv->rps.pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 1175 dev_priv->rps.pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
1073} 1176}
1074 1177
1178static void guc_interrupts_release(struct drm_i915_private *dev_priv)
1179{
1180 struct intel_engine_cs *engine;
1181 enum intel_engine_id id;
1182 int irqs;
1183
1184 /*
1185 * tell all command streamers NOT to forward interrupts or vblank
1186 * to GuC.
1187 */
1188 irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
1189 irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
1190 for_each_engine(engine, dev_priv, id)
1191 I915_WRITE(RING_MODE_GEN7(engine), irqs);
1192
1193 /* route all GT interrupts to the host */
1194 I915_WRITE(GUC_BCS_RCS_IER, 0);
1195 I915_WRITE(GUC_VCS2_VCS1_IER, 0);
1196 I915_WRITE(GUC_WD_VECS_IER, 0);
1197
1198 dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
1199 dev_priv->rps.pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
1200}
1201
1075int i915_guc_submission_enable(struct drm_i915_private *dev_priv) 1202int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
1076{ 1203{
1077 struct intel_guc *guc = &dev_priv->guc; 1204 struct intel_guc *guc = &dev_priv->guc;
1078 struct i915_guc_client *client = guc->execbuf_client; 1205 struct i915_guc_client *client = guc->execbuf_client;
1079 struct intel_engine_cs *engine; 1206 struct intel_engine_cs *engine;
1080 enum intel_engine_id id; 1207 enum intel_engine_id id;
1208 int err;
1209
1210 if (!client) {
1211 client = guc_client_alloc(dev_priv,
1212 INTEL_INFO(dev_priv)->ring_mask,
1213 GUC_CLIENT_PRIORITY_KMD_NORMAL,
1214 dev_priv->kernel_context);
1215 if (IS_ERR(client)) {
1216 DRM_ERROR("Failed to create GuC client for execbuf!\n");
1217 return PTR_ERR(client);
1218 }
1081 1219
1082 if (!client) 1220 guc->execbuf_client = client;
1083 return -ENODEV; 1221 }
1084 1222
1085 intel_guc_sample_forcewake(guc); 1223 err = intel_guc_sample_forcewake(guc);
1224 if (err)
1225 goto err_execbuf_client;
1086 1226
1087 guc_reset_wq(client); 1227 guc_reset_wq(client);
1088 guc_init_doorbell_hw(guc); 1228
1229 err = guc_init_doorbell_hw(guc);
1230 if (err)
1231 goto err_execbuf_client;
1089 1232
1090 /* Take over from manual control of ELSP (execlists) */ 1233 /* Take over from manual control of ELSP (execlists) */
1091 guc_interrupts_capture(dev_priv); 1234 guc_interrupts_capture(dev_priv);
@@ -1112,30 +1255,11 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
1112 } 1255 }
1113 1256
1114 return 0; 1257 return 0;
1115}
1116 1258
1117static void guc_interrupts_release(struct drm_i915_private *dev_priv) 1259err_execbuf_client:
1118{ 1260 guc_client_free(guc->execbuf_client);
1119 struct intel_engine_cs *engine; 1261 guc->execbuf_client = NULL;
1120 enum intel_engine_id id; 1262 return err;
1121 int irqs;
1122
1123 /*
1124 * tell all command streamers NOT to forward interrupts or vblank
1125 * to GuC.
1126 */
1127 irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
1128 irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
1129 for_each_engine(engine, dev_priv, id)
1130 I915_WRITE(RING_MODE_GEN7(engine), irqs);
1131
1132 /* route all GT interrupts to the host */
1133 I915_WRITE(GUC_BCS_RCS_IER, 0);
1134 I915_WRITE(GUC_VCS2_VCS1_IER, 0);
1135 I915_WRITE(GUC_WD_VECS_IER, 0);
1136
1137 dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
1138 dev_priv->rps.pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
1139} 1263}
1140 1264
1141void i915_guc_submission_disable(struct drm_i915_private *dev_priv) 1265void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
@@ -1144,30 +1268,11 @@ void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
1144 1268
1145 guc_interrupts_release(dev_priv); 1269 guc_interrupts_release(dev_priv);
1146 1270
1147 if (!guc->execbuf_client)
1148 return;
1149
1150 /* Revert back to manual ELSP submission */ 1271 /* Revert back to manual ELSP submission */
1151 intel_engines_reset_default_submission(dev_priv); 1272 intel_engines_reset_default_submission(dev_priv);
1152}
1153
1154void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
1155{
1156 struct intel_guc *guc = &dev_priv->guc;
1157 struct i915_guc_client *client;
1158 1273
1159 client = fetch_and_zero(&guc->execbuf_client); 1274 guc_client_free(guc->execbuf_client);
1160 if (!client) 1275 guc->execbuf_client = NULL;
1161 return;
1162
1163 guc_client_free(dev_priv, client);
1164
1165 i915_vma_unpin_and_release(&guc->ads_vma);
1166 i915_vma_unpin_and_release(&guc->log.vma);
1167
1168 if (guc->ctx_pool_vma)
1169 ida_destroy(&guc->ctx_ids);
1170 i915_vma_unpin_and_release(&guc->ctx_pool_vma);
1171} 1276}
1172 1277
1173/** 1278/**
@@ -1196,7 +1301,6 @@ int intel_guc_suspend(struct drm_i915_private *dev_priv)
1196 return intel_guc_send(guc, data, ARRAY_SIZE(data)); 1301 return intel_guc_send(guc, data, ARRAY_SIZE(data));
1197} 1302}
1198 1303
1199
1200/** 1304/**
1201 * intel_guc_resume() - notify GuC resuming from suspend state 1305 * intel_guc_resume() - notify GuC resuming from suspend state
1202 * @dev_priv: i915 device private 1306 * @dev_priv: i915 device private
@@ -1222,5 +1326,3 @@ int intel_guc_resume(struct drm_i915_private *dev_priv)
1222 1326
1223 return intel_guc_send(guc, data, ARRAY_SIZE(data)); 1327 return intel_guc_send(guc, data, ARRAY_SIZE(data));
1224} 1328}
1225
1226
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8163d5024ff8..fd97fe00cd0d 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1742,8 +1742,8 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
1742 I915_WRITE(SOFT_SCRATCH(15), msg & ~flush); 1742 I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);
1743 1743
1744 /* Handle flush interrupt in bottom half */ 1744 /* Handle flush interrupt in bottom half */
1745 queue_work(dev_priv->guc.log.flush_wq, 1745 queue_work(dev_priv->guc.log.runtime.flush_wq,
1746 &dev_priv->guc.log.flush_work); 1746 &dev_priv->guc.log.runtime.flush_work);
1747 1747
1748 dev_priv->guc.log.flush_interrupt_count++; 1748 dev_priv->guc.log.flush_interrupt_count++;
1749 } else { 1749 } else {
@@ -4252,12 +4252,12 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4252 dev_priv->rps.pm_intrmsk_mbz = 0; 4252 dev_priv->rps.pm_intrmsk_mbz = 0;
4253 4253
4254 /* 4254 /*
4255 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer 4255 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
4256 * if GEN6_PM_UP_EI_EXPIRED is masked. 4256 * if GEN6_PM_UP_EI_EXPIRED is masked.
4257 * 4257 *
4258 * TODO: verify if this can be reproduced on VLV,CHV. 4258 * TODO: verify if this can be reproduced on VLV,CHV.
4259 */ 4259 */
4260 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) 4260 if (INTEL_INFO(dev_priv)->gen <= 7)
4261 dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 4261 dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
4262 4262
4263 if (INTEL_INFO(dev_priv)->gen >= 8) 4263 if (INTEL_INFO(dev_priv)->gen >= 8)
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 732101ed57fb..f87b0c4e564d 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -61,6 +61,7 @@
61 .has_overlay = 1, .overlay_needs_physical = 1, \ 61 .has_overlay = 1, .overlay_needs_physical = 1, \
62 .has_gmch_display = 1, \ 62 .has_gmch_display = 1, \
63 .hws_needs_physical = 1, \ 63 .hws_needs_physical = 1, \
64 .unfenced_needs_alignment = 1, \
64 .ring_mask = RENDER_RING, \ 65 .ring_mask = RENDER_RING, \
65 GEN_DEFAULT_PIPEOFFSETS, \ 66 GEN_DEFAULT_PIPEOFFSETS, \
66 CURSOR_OFFSETS 67 CURSOR_OFFSETS
@@ -102,6 +103,7 @@ static const struct intel_device_info intel_i915g_info = {
102 .platform = INTEL_I915G, .cursor_needs_physical = 1, 103 .platform = INTEL_I915G, .cursor_needs_physical = 1,
103 .has_overlay = 1, .overlay_needs_physical = 1, 104 .has_overlay = 1, .overlay_needs_physical = 1,
104 .hws_needs_physical = 1, 105 .hws_needs_physical = 1,
106 .unfenced_needs_alignment = 1,
105}; 107};
106 108
107static const struct intel_device_info intel_i915gm_info = { 109static const struct intel_device_info intel_i915gm_info = {
@@ -113,6 +115,7 @@ static const struct intel_device_info intel_i915gm_info = {
113 .supports_tv = 1, 115 .supports_tv = 1,
114 .has_fbc = 1, 116 .has_fbc = 1,
115 .hws_needs_physical = 1, 117 .hws_needs_physical = 1,
118 .unfenced_needs_alignment = 1,
116}; 119};
117 120
118static const struct intel_device_info intel_i945g_info = { 121static const struct intel_device_info intel_i945g_info = {
@@ -121,6 +124,7 @@ static const struct intel_device_info intel_i945g_info = {
121 .has_hotplug = 1, .cursor_needs_physical = 1, 124 .has_hotplug = 1, .cursor_needs_physical = 1,
122 .has_overlay = 1, .overlay_needs_physical = 1, 125 .has_overlay = 1, .overlay_needs_physical = 1,
123 .hws_needs_physical = 1, 126 .hws_needs_physical = 1,
127 .unfenced_needs_alignment = 1,
124}; 128};
125 129
126static const struct intel_device_info intel_i945gm_info = { 130static const struct intel_device_info intel_i945gm_info = {
@@ -131,6 +135,7 @@ static const struct intel_device_info intel_i945gm_info = {
131 .supports_tv = 1, 135 .supports_tv = 1,
132 .has_fbc = 1, 136 .has_fbc = 1,
133 .hws_needs_physical = 1, 137 .hws_needs_physical = 1,
138 .unfenced_needs_alignment = 1,
134}; 139};
135 140
136static const struct intel_device_info intel_g33_info = { 141static const struct intel_device_info intel_g33_info = {
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 8c121187ff39..060b171480d5 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1705,7 +1705,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
1705 */ 1705 */
1706 if (WARN_ON(stream->sample_flags != props->sample_flags)) { 1706 if (WARN_ON(stream->sample_flags != props->sample_flags)) {
1707 ret = -ENODEV; 1707 ret = -ENODEV;
1708 goto err_alloc; 1708 goto err_flags;
1709 } 1709 }
1710 1710
1711 list_add(&stream->link, &dev_priv->perf.streams); 1711 list_add(&stream->link, &dev_priv->perf.streams);
@@ -1728,6 +1728,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
1728 1728
1729err_open: 1729err_open:
1730 list_del(&stream->link); 1730 list_del(&stream->link);
1731err_flags:
1731 if (stream->ops->destroy) 1732 if (stream->ops->destroy)
1732 stream->ops->destroy(stream); 1733 stream->ops->destroy(stream);
1733err_alloc: 1734err_alloc:
@@ -1793,6 +1794,11 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
1793 if (ret) 1794 if (ret)
1794 return ret; 1795 return ret;
1795 1796
1797 if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
1798 DRM_DEBUG("Unknown i915 perf property ID\n");
1799 return -EINVAL;
1800 }
1801
1796 switch ((enum drm_i915_perf_property_id)id) { 1802 switch ((enum drm_i915_perf_property_id)id) {
1797 case DRM_I915_PERF_PROP_CTX_HANDLE: 1803 case DRM_I915_PERF_PROP_CTX_HANDLE:
1798 props->single_context = 1; 1804 props->single_context = 1;
@@ -1862,9 +1868,8 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
1862 props->oa_periodic = true; 1868 props->oa_periodic = true;
1863 props->oa_period_exponent = value; 1869 props->oa_period_exponent = value;
1864 break; 1870 break;
1865 default: 1871 case DRM_I915_PERF_PROP_MAX:
1866 MISSING_CASE(id); 1872 MISSING_CASE(id);
1867 DRM_DEBUG("Unknown i915 perf property ID\n");
1868 return -EINVAL; 1873 return -EINVAL;
1869 } 1874 }
1870 1875
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 04c8f69fcc62..11b12f412492 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7829,7 +7829,14 @@ enum {
7829#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12) 7829#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
7830#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12) 7830#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
7831#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1<<8) 7831#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1<<8)
7832#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1<<7)
7833#define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1<<6)
7832#define TRANS_DDI_BFI_ENABLE (1<<4) 7834#define TRANS_DDI_BFI_ENABLE (1<<4)
7835#define TRANS_DDI_HIGH_TMDS_CHAR_RATE (1<<4)
7836#define TRANS_DDI_HDMI_SCRAMBLING (1<<0)
7837#define TRANS_DDI_HDMI_SCRAMBLING_MASK (TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE \
7838 | TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \
7839 | TRANS_DDI_HDMI_SCRAMBLING)
7833 7840
7834/* DisplayPort Transport Control */ 7841/* DisplayPort Transport Control */
7835#define _DP_TP_CTL_A 0x64040 7842#define _DP_TP_CTL_A 0x64040
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 94a3a3299910..c5455d36b617 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -25,6 +25,24 @@
25#ifndef __I915_UTILS_H 25#ifndef __I915_UTILS_H
26#define __I915_UTILS_H 26#define __I915_UTILS_H
27 27
28#undef WARN_ON
29/* Many gcc seem to no see through this and fall over :( */
30#if 0
31#define WARN_ON(x) ({ \
32 bool __i915_warn_cond = (x); \
33 if (__builtin_constant_p(__i915_warn_cond)) \
34 BUILD_BUG_ON(__i915_warn_cond); \
35 WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
36#else
37#define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
38#endif
39
40#undef WARN_ON_ONCE
41#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")")
42
43#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
44 (long)(x), __func__)
45
28#if GCC_VERSION >= 70000 46#if GCC_VERSION >= 70000
29#define add_overflows(A, B) \ 47#define add_overflows(A, B) \
30 __builtin_add_overflow_p((A), (B), (typeof((A) + (B)))0) 48 __builtin_add_overflow_p((A), (B), (typeof((A) + (B)))0)
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index ba986edee312..9ccbf26124c6 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -47,11 +47,12 @@ static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
47unsigned int intel_engine_wakeup(struct intel_engine_cs *engine) 47unsigned int intel_engine_wakeup(struct intel_engine_cs *engine)
48{ 48{
49 struct intel_breadcrumbs *b = &engine->breadcrumbs; 49 struct intel_breadcrumbs *b = &engine->breadcrumbs;
50 unsigned long flags;
50 unsigned int result; 51 unsigned int result;
51 52
52 spin_lock_irq(&b->irq_lock); 53 spin_lock_irqsave(&b->irq_lock, flags);
53 result = __intel_breadcrumbs_wakeup(b); 54 result = __intel_breadcrumbs_wakeup(b);
54 spin_unlock_irq(&b->irq_lock); 55 spin_unlock_irqrestore(&b->irq_lock, flags);
55 56
56 return result; 57 return result;
57} 58}
@@ -579,6 +580,8 @@ static int intel_breadcrumbs_signaler(void *arg)
579 signaler_set_rtpriority(); 580 signaler_set_rtpriority();
580 581
581 do { 582 do {
583 bool do_schedule = true;
584
582 set_current_state(TASK_INTERRUPTIBLE); 585 set_current_state(TASK_INTERRUPTIBLE);
583 586
584 /* We are either woken up by the interrupt bottom-half, 587 /* We are either woken up by the interrupt bottom-half,
@@ -625,9 +628,23 @@ static int intel_breadcrumbs_signaler(void *arg)
625 spin_unlock_irq(&b->rb_lock); 628 spin_unlock_irq(&b->rb_lock);
626 629
627 i915_gem_request_put(request); 630 i915_gem_request_put(request);
628 } else { 631
632 /* If the engine is saturated we may be continually
633 * processing completed requests. This angers the
634 * NMI watchdog if we never let anything else
635 * have access to the CPU. Let's pretend to be nice
636 * and relinquish the CPU if we burn through the
637 * entire RT timeslice!
638 */
639 do_schedule = need_resched();
640 }
641
642 if (unlikely(do_schedule)) {
629 DEFINE_WAIT(exec); 643 DEFINE_WAIT(exec);
630 644
645 if (kthread_should_park())
646 kthread_parkme();
647
631 if (kthread_should_stop()) { 648 if (kthread_should_stop()) {
632 GEM_BUG_ON(request); 649 GEM_BUG_ON(request);
633 break; 650 break;
@@ -640,9 +657,6 @@ static int intel_breadcrumbs_signaler(void *arg)
640 657
641 if (request) 658 if (request)
642 remove_wait_queue(&request->execute, &exec); 659 remove_wait_queue(&request->execute, &exec);
643
644 if (kthread_should_park())
645 kthread_parkme();
646 } 660 }
647 i915_gem_request_put(request); 661 i915_gem_request_put(request);
648 } while (1); 662 } while (1);
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index c2cc33f3d888..dd3ad52b7dfe 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -1442,16 +1442,33 @@ static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state,
1442 if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) 1442 if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
1443 pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95); 1443 pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
1444 1444
1445 /* BSpec says "Do not use DisplayPort with CDCLK less than 1445 /* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz,
1446 * 432 MHz, audio enabled, port width x4, and link rate 1446 * audio enabled, port width x4, and link rate HBR2 (5.4 GHz), or else
1447 * HBR2 (5.4 GHz), or else there may be audio corruption or 1447 * there may be audio corruption or screen corruption." This cdclk
1448 * screen corruption." 1448 * restriction for GLK is 316.8 MHz and since GLK can output two
1449 * pixels per clock, the pixel rate becomes 2 * 316.8 MHz.
1449 */ 1450 */
1450 if (intel_crtc_has_dp_encoder(crtc_state) && 1451 if (intel_crtc_has_dp_encoder(crtc_state) &&
1451 crtc_state->has_audio && 1452 crtc_state->has_audio &&
1452 crtc_state->port_clock >= 540000 && 1453 crtc_state->port_clock >= 540000 &&
1453 crtc_state->lane_count == 4) 1454 crtc_state->lane_count == 4) {
1454 pixel_rate = max(432000, pixel_rate); 1455 if (IS_GEMINILAKE(dev_priv))
1456 pixel_rate = max(2 * 316800, pixel_rate);
1457 else
1458 pixel_rate = max(432000, pixel_rate);
1459 }
1460
1461 /* According to BSpec, "The CD clock frequency must be at least twice
1462 * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
1463 * The check for GLK has to be adjusted as the platform can output
1464 * two pixels per clock.
1465 */
1466 if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9) {
1467 if (IS_GEMINILAKE(dev_priv))
1468 pixel_rate = max(2 * 2 * 96000, pixel_rate);
1469 else
1470 pixel_rate = max(2 * 96000, pixel_rate);
1471 }
1455 1472
1456 return pixel_rate; 1473 return pixel_rate;
1457} 1474}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 8c82607294c6..2797bf37c3ac 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -669,15 +669,16 @@ static const struct dmi_system_id intel_spurious_crt_detect[] = {
669 { } 669 { }
670}; 670};
671 671
672static enum drm_connector_status 672static int
673intel_crt_detect(struct drm_connector *connector, bool force) 673intel_crt_detect(struct drm_connector *connector,
674 struct drm_modeset_acquire_ctx *ctx,
675 bool force)
674{ 676{
675 struct drm_i915_private *dev_priv = to_i915(connector->dev); 677 struct drm_i915_private *dev_priv = to_i915(connector->dev);
676 struct intel_crt *crt = intel_attached_crt(connector); 678 struct intel_crt *crt = intel_attached_crt(connector);
677 struct intel_encoder *intel_encoder = &crt->base; 679 struct intel_encoder *intel_encoder = &crt->base;
678 enum drm_connector_status status; 680 int status, ret;
679 struct intel_load_detect_pipe tmp; 681 struct intel_load_detect_pipe tmp;
680 struct drm_modeset_acquire_ctx ctx;
681 682
682 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n", 683 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
683 connector->base.id, connector->name, 684 connector->base.id, connector->name,
@@ -721,10 +722,9 @@ intel_crt_detect(struct drm_connector *connector, bool force)
721 goto out; 722 goto out;
722 } 723 }
723 724
724 drm_modeset_acquire_init(&ctx, 0);
725
726 /* for pre-945g platforms use load detect */ 725 /* for pre-945g platforms use load detect */
727 if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) { 726 ret = intel_get_load_detect_pipe(connector, NULL, &tmp, ctx);
727 if (ret > 0) {
728 if (intel_crt_detect_ddc(connector)) 728 if (intel_crt_detect_ddc(connector))
729 status = connector_status_connected; 729 status = connector_status_connected;
730 else if (INTEL_GEN(dev_priv) < 4) 730 else if (INTEL_GEN(dev_priv) < 4)
@@ -734,12 +734,11 @@ intel_crt_detect(struct drm_connector *connector, bool force)
734 status = connector_status_disconnected; 734 status = connector_status_disconnected;
735 else 735 else
736 status = connector_status_unknown; 736 status = connector_status_unknown;
737 intel_release_load_detect_pipe(connector, &tmp, &ctx); 737 intel_release_load_detect_pipe(connector, &tmp, ctx);
738 } else 738 } else if (ret == 0)
739 status = connector_status_unknown; 739 status = connector_status_unknown;
740 740 else if (ret < 0)
741 drm_modeset_drop_locks(&ctx); 741 status = ret;
742 drm_modeset_acquire_fini(&ctx);
743 742
744out: 743out:
745 intel_display_power_put(dev_priv, intel_encoder->power_domain); 744 intel_display_power_put(dev_priv, intel_encoder->power_domain);
@@ -811,7 +810,6 @@ void intel_crt_reset(struct drm_encoder *encoder)
811 810
812static const struct drm_connector_funcs intel_crt_connector_funcs = { 811static const struct drm_connector_funcs intel_crt_connector_funcs = {
813 .dpms = drm_atomic_helper_connector_dpms, 812 .dpms = drm_atomic_helper_connector_dpms,
814 .detect = intel_crt_detect,
815 .fill_modes = drm_helper_probe_single_connector_modes, 813 .fill_modes = drm_helper_probe_single_connector_modes,
816 .late_register = intel_connector_register, 814 .late_register = intel_connector_register,
817 .early_unregister = intel_connector_unregister, 815 .early_unregister = intel_connector_unregister,
@@ -823,6 +821,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
823}; 821};
824 822
825static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { 823static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
824 .detect_ctx = intel_crt_detect,
826 .mode_valid = intel_crt_mode_valid, 825 .mode_valid = intel_crt_mode_valid,
827 .get_modes = intel_crt_get_modes, 826 .get_modes = intel_crt_get_modes,
828}; 827};
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 36832257cc9b..1575bde0cf90 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -49,7 +49,7 @@ MODULE_FIRMWARE(I915_CSR_SKL);
49MODULE_FIRMWARE(I915_CSR_BXT); 49MODULE_FIRMWARE(I915_CSR_BXT);
50#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) 50#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
51 51
52#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares" 52#define FIRMWARE_URL "https://01.org/linuxgraphics/downloads/firmware"
53 53
54 54
55 55
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index d8214ba8da14..0914ad96a71b 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -539,7 +539,7 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
539 * values in advance. This function programs the correct values for 539 * values in advance. This function programs the correct values for
540 * DP/eDP/FDI use cases. 540 * DP/eDP/FDI use cases.
541 */ 541 */
542void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder) 542static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
543{ 543{
544 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 544 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
545 u32 iboost_bit = 0; 545 u32 iboost_bit = 0;
@@ -806,7 +806,7 @@ void hsw_fdi_link_train(struct intel_crtc *crtc,
806 DP_TP_CTL_ENABLE); 806 DP_TP_CTL_ENABLE);
807} 807}
808 808
809void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder) 809static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
810{ 810{
811 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 811 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
812 struct intel_digital_port *intel_dig_port = 812 struct intel_digital_port *intel_dig_port =
@@ -837,7 +837,8 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
837 return ret; 837 return ret;
838} 838}
839 839
840static struct intel_encoder * 840/* Finds the only possible encoder associated with the given CRTC. */
841struct intel_encoder *
841intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state) 842intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
842{ 843{
843 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 844 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -1127,72 +1128,6 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
1127 bxt_ddi_clock_get(encoder, pipe_config); 1128 bxt_ddi_clock_get(encoder, pipe_config);
1128} 1129}
1129 1130
1130static bool
1131hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
1132 struct intel_crtc_state *crtc_state,
1133 struct intel_encoder *encoder)
1134{
1135 struct intel_shared_dpll *pll;
1136
1137 pll = intel_get_shared_dpll(intel_crtc, crtc_state,
1138 encoder);
1139 if (!pll)
1140 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
1141 pipe_name(intel_crtc->pipe));
1142
1143 return pll;
1144}
1145
1146static bool
1147skl_ddi_pll_select(struct intel_crtc *intel_crtc,
1148 struct intel_crtc_state *crtc_state,
1149 struct intel_encoder *encoder)
1150{
1151 struct intel_shared_dpll *pll;
1152
1153 pll = intel_get_shared_dpll(intel_crtc, crtc_state, encoder);
1154 if (pll == NULL) {
1155 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
1156 pipe_name(intel_crtc->pipe));
1157 return false;
1158 }
1159
1160 return true;
1161}
1162
1163static bool
1164bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
1165 struct intel_crtc_state *crtc_state,
1166 struct intel_encoder *encoder)
1167{
1168 return !!intel_get_shared_dpll(intel_crtc, crtc_state, encoder);
1169}
1170
1171/*
1172 * Tries to find a *shared* PLL for the CRTC and store it in
1173 * intel_crtc->ddi_pll_sel.
1174 *
1175 * For private DPLLs, compute_config() should do the selection for us. This
1176 * function should be folded into compute_config() eventually.
1177 */
1178bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
1179 struct intel_crtc_state *crtc_state)
1180{
1181 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
1182 struct intel_encoder *encoder =
1183 intel_ddi_get_crtc_new_encoder(crtc_state);
1184
1185 if (IS_GEN9_BC(dev_priv))
1186 return skl_ddi_pll_select(intel_crtc, crtc_state,
1187 encoder);
1188 else if (IS_GEN9_LP(dev_priv))
1189 return bxt_ddi_pll_select(intel_crtc, crtc_state,
1190 encoder);
1191 else
1192 return hsw_ddi_pll_select(intel_crtc, crtc_state,
1193 encoder);
1194}
1195
1196void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state) 1131void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
1197{ 1132{
1198 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 1133 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -1309,6 +1244,11 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
1309 temp |= TRANS_DDI_MODE_SELECT_HDMI; 1244 temp |= TRANS_DDI_MODE_SELECT_HDMI;
1310 else 1245 else
1311 temp |= TRANS_DDI_MODE_SELECT_DVI; 1246 temp |= TRANS_DDI_MODE_SELECT_DVI;
1247
1248 if (crtc_state->hdmi_scrambling)
1249 temp |= TRANS_DDI_HDMI_SCRAMBLING_MASK;
1250 if (crtc_state->hdmi_high_tmds_clock_ratio)
1251 temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE;
1312 } else if (type == INTEL_OUTPUT_ANALOG) { 1252 } else if (type == INTEL_OUTPUT_ANALOG) {
1313 temp |= TRANS_DDI_MODE_SELECT_FDI; 1253 temp |= TRANS_DDI_MODE_SELECT_FDI;
1314 temp |= (crtc_state->fdi_lanes - 1) << 1; 1254 temp |= (crtc_state->fdi_lanes - 1) << 1;
@@ -1676,8 +1616,8 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
1676 return DDI_BUF_TRANS_SELECT(level); 1616 return DDI_BUF_TRANS_SELECT(level);
1677} 1617}
1678 1618
1679void intel_ddi_clk_select(struct intel_encoder *encoder, 1619static void intel_ddi_clk_select(struct intel_encoder *encoder,
1680 struct intel_shared_dpll *pll) 1620 struct intel_shared_dpll *pll)
1681{ 1621{
1682 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1622 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1683 enum port port = intel_ddi_get_encoder_port(encoder); 1623 enum port port = intel_ddi_get_encoder_port(encoder);
@@ -1881,6 +1821,12 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
1881 if (type == INTEL_OUTPUT_HDMI) { 1821 if (type == INTEL_OUTPUT_HDMI) {
1882 struct intel_digital_port *intel_dig_port = 1822 struct intel_digital_port *intel_dig_port =
1883 enc_to_dig_port(encoder); 1823 enc_to_dig_port(encoder);
1824 bool clock_ratio = pipe_config->hdmi_high_tmds_clock_ratio;
1825 bool scrambling = pipe_config->hdmi_scrambling;
1826
1827 intel_hdmi_handle_sink_scrambling(intel_encoder,
1828 conn_state->connector,
1829 clock_ratio, scrambling);
1884 1830
1885 /* In HDMI/DVI mode, the port width, and swing/emphasis values 1831 /* In HDMI/DVI mode, the port width, and swing/emphasis values
1886 * are ignored so nothing special needs to be done besides 1832 * are ignored so nothing special needs to be done besides
@@ -1914,6 +1860,12 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder,
1914 if (old_crtc_state->has_audio) 1860 if (old_crtc_state->has_audio)
1915 intel_audio_codec_disable(intel_encoder); 1861 intel_audio_codec_disable(intel_encoder);
1916 1862
1863 if (type == INTEL_OUTPUT_HDMI) {
1864 intel_hdmi_handle_sink_scrambling(intel_encoder,
1865 old_conn_state->connector,
1866 false, false);
1867 }
1868
1917 if (type == INTEL_OUTPUT_EDP) { 1869 if (type == INTEL_OUTPUT_EDP) {
1918 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1870 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1919 1871
@@ -2040,6 +1992,12 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
2040 1992
2041 if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config)) 1993 if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
2042 pipe_config->has_infoframe = true; 1994 pipe_config->has_infoframe = true;
1995
1996 if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) ==
1997 TRANS_DDI_HDMI_SCRAMBLING_MASK)
1998 pipe_config->hdmi_scrambling = true;
1999 if (temp & TRANS_DDI_HIGH_TMDS_CHAR_RATE)
2000 pipe_config->hdmi_high_tmds_clock_ratio = true;
2043 /* fall through */ 2001 /* fall through */
2044 case TRANS_DDI_MODE_SELECT_DVI: 2002 case TRANS_DDI_MODE_SELECT_DVI:
2045 pipe_config->lane_count = 4; 2003 pipe_config->lane_count = 4;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e27ea89efd67..3617927af269 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1997,7 +1997,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane)
1997 unsigned int cpp = fb->format->cpp[plane]; 1997 unsigned int cpp = fb->format->cpp[plane];
1998 1998
1999 switch (fb->modifier) { 1999 switch (fb->modifier) {
2000 case DRM_FORMAT_MOD_NONE: 2000 case DRM_FORMAT_MOD_LINEAR:
2001 return cpp; 2001 return cpp;
2002 case I915_FORMAT_MOD_X_TILED: 2002 case I915_FORMAT_MOD_X_TILED:
2003 if (IS_GEN2(dev_priv)) 2003 if (IS_GEN2(dev_priv))
@@ -2033,7 +2033,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane)
2033static unsigned int 2033static unsigned int
2034intel_tile_height(const struct drm_framebuffer *fb, int plane) 2034intel_tile_height(const struct drm_framebuffer *fb, int plane)
2035{ 2035{
2036 if (fb->modifier == DRM_FORMAT_MOD_NONE) 2036 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
2037 return 1; 2037 return 1;
2038 else 2038 else
2039 return intel_tile_size(to_i915(fb->dev)) / 2039 return intel_tile_size(to_i915(fb->dev)) /
@@ -2107,7 +2107,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2107 return 4096; 2107 return 4096;
2108 2108
2109 switch (fb->modifier) { 2109 switch (fb->modifier) {
2110 case DRM_FORMAT_MOD_NONE: 2110 case DRM_FORMAT_MOD_LINEAR:
2111 return intel_linear_alignment(dev_priv); 2111 return intel_linear_alignment(dev_priv);
2112 case I915_FORMAT_MOD_X_TILED: 2112 case I915_FORMAT_MOD_X_TILED:
2113 if (INTEL_GEN(dev_priv) >= 9) 2113 if (INTEL_GEN(dev_priv) >= 9)
@@ -2290,7 +2290,7 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
2290 2290
2291 WARN_ON(new_offset > old_offset); 2291 WARN_ON(new_offset > old_offset);
2292 2292
2293 if (fb->modifier != DRM_FORMAT_MOD_NONE) { 2293 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
2294 unsigned int tile_size, tile_width, tile_height; 2294 unsigned int tile_size, tile_width, tile_height;
2295 unsigned int pitch_tiles; 2295 unsigned int pitch_tiles;
2296 2296
@@ -2345,7 +2345,7 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
2345 if (alignment) 2345 if (alignment)
2346 alignment--; 2346 alignment--;
2347 2347
2348 if (fb_modifier != DRM_FORMAT_MOD_NONE) { 2348 if (fb_modifier != DRM_FORMAT_MOD_LINEAR) {
2349 unsigned int tile_size, tile_width, tile_height; 2349 unsigned int tile_size, tile_width, tile_height;
2350 unsigned int tile_rows, tiles, pitch_tiles; 2350 unsigned int tile_rows, tiles, pitch_tiles;
2351 2351
@@ -2471,7 +2471,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
2471 DRM_ROTATE_0, tile_size); 2471 DRM_ROTATE_0, tile_size);
2472 offset /= tile_size; 2472 offset /= tile_size;
2473 2473
2474 if (fb->modifier != DRM_FORMAT_MOD_NONE) { 2474 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
2475 unsigned int tile_width, tile_height; 2475 unsigned int tile_width, tile_height;
2476 unsigned int pitch_tiles; 2476 unsigned int pitch_tiles;
2477 struct drm_rect r; 2477 struct drm_rect r;
@@ -2803,7 +2803,7 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
2803 int cpp = fb->format->cpp[plane]; 2803 int cpp = fb->format->cpp[plane];
2804 2804
2805 switch (fb->modifier) { 2805 switch (fb->modifier) {
2806 case DRM_FORMAT_MOD_NONE: 2806 case DRM_FORMAT_MOD_LINEAR:
2807 case I915_FORMAT_MOD_X_TILED: 2807 case I915_FORMAT_MOD_X_TILED:
2808 switch (cpp) { 2808 switch (cpp) {
2809 case 8: 2809 case 8:
@@ -2962,28 +2962,27 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
2962 return 0; 2962 return 0;
2963} 2963}
2964 2964
2965static void i9xx_update_primary_plane(struct drm_plane *primary, 2965static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
2966 const struct intel_crtc_state *crtc_state, 2966 const struct intel_plane_state *plane_state)
2967 const struct intel_plane_state *plane_state)
2968{ 2967{
2969 struct drm_i915_private *dev_priv = to_i915(primary->dev); 2968 struct drm_i915_private *dev_priv =
2970 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 2969 to_i915(plane_state->base.plane->dev);
2971 struct drm_framebuffer *fb = plane_state->base.fb; 2970 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
2972 int plane = intel_crtc->plane; 2971 const struct drm_framebuffer *fb = plane_state->base.fb;
2973 u32 linear_offset;
2974 u32 dspcntr;
2975 i915_reg_t reg = DSPCNTR(plane);
2976 unsigned int rotation = plane_state->base.rotation; 2972 unsigned int rotation = plane_state->base.rotation;
2977 int x = plane_state->base.src.x1 >> 16; 2973 u32 dspcntr;
2978 int y = plane_state->base.src.y1 >> 16;
2979 unsigned long irqflags;
2980 2974
2981 dspcntr = DISPPLANE_GAMMA_ENABLE; 2975 dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE;
2982 2976
2983 dspcntr |= DISPLAY_PLANE_ENABLE; 2977 if (IS_G4X(dev_priv) || IS_GEN5(dev_priv) ||
2978 IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
2979 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2980
2981 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2982 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2984 2983
2985 if (INTEL_GEN(dev_priv) < 4) { 2984 if (INTEL_GEN(dev_priv) < 4) {
2986 if (intel_crtc->pipe == PIPE_B) 2985 if (crtc->pipe == PIPE_B)
2987 dspcntr |= DISPPLANE_SEL_PIPE_B; 2986 dspcntr |= DISPPLANE_SEL_PIPE_B;
2988 } 2987 }
2989 2988
@@ -3010,7 +3009,8 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
3010 dspcntr |= DISPPLANE_RGBX101010; 3009 dspcntr |= DISPPLANE_RGBX101010;
3011 break; 3010 break;
3012 default: 3011 default:
3013 BUG(); 3012 MISSING_CASE(fb->format->format);
3013 return 0;
3014 } 3014 }
3015 3015
3016 if (INTEL_GEN(dev_priv) >= 4 && 3016 if (INTEL_GEN(dev_priv) >= 4 &&
@@ -3023,25 +3023,66 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
3023 if (rotation & DRM_REFLECT_X) 3023 if (rotation & DRM_REFLECT_X)
3024 dspcntr |= DISPPLANE_MIRROR; 3024 dspcntr |= DISPPLANE_MIRROR;
3025 3025
3026 if (IS_G4X(dev_priv)) 3026 return dspcntr;
3027 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 3027}
3028 3028
3029 intel_add_fb_offsets(&x, &y, plane_state, 0); 3029int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3030{
3031 struct drm_i915_private *dev_priv =
3032 to_i915(plane_state->base.plane->dev);
3033 int src_x = plane_state->base.src.x1 >> 16;
3034 int src_y = plane_state->base.src.y1 >> 16;
3035 u32 offset;
3036
3037 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3030 3038
3031 if (INTEL_GEN(dev_priv) >= 4) 3039 if (INTEL_GEN(dev_priv) >= 4)
3032 intel_crtc->dspaddr_offset = 3040 offset = intel_compute_tile_offset(&src_x, &src_y,
3033 intel_compute_tile_offset(&x, &y, plane_state, 0); 3041 plane_state, 0);
3042 else
3043 offset = 0;
3034 3044
3035 if (rotation & DRM_ROTATE_180) { 3045 /* HSW/BDW do this automagically in hardware */
3036 x += crtc_state->pipe_src_w - 1; 3046 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3037 y += crtc_state->pipe_src_h - 1; 3047 unsigned int rotation = plane_state->base.rotation;
3038 } else if (rotation & DRM_REFLECT_X) { 3048 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3039 x += crtc_state->pipe_src_w - 1; 3049 int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3050
3051 if (rotation & DRM_ROTATE_180) {
3052 src_x += src_w - 1;
3053 src_y += src_h - 1;
3054 } else if (rotation & DRM_REFLECT_X) {
3055 src_x += src_w - 1;
3056 }
3040 } 3057 }
3041 3058
3059 plane_state->main.offset = offset;
3060 plane_state->main.x = src_x;
3061 plane_state->main.y = src_y;
3062
3063 return 0;
3064}
3065
3066static void i9xx_update_primary_plane(struct drm_plane *primary,
3067 const struct intel_crtc_state *crtc_state,
3068 const struct intel_plane_state *plane_state)
3069{
3070 struct drm_i915_private *dev_priv = to_i915(primary->dev);
3071 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3072 struct drm_framebuffer *fb = plane_state->base.fb;
3073 int plane = intel_crtc->plane;
3074 u32 linear_offset;
3075 u32 dspcntr = plane_state->ctl;
3076 i915_reg_t reg = DSPCNTR(plane);
3077 int x = plane_state->main.x;
3078 int y = plane_state->main.y;
3079 unsigned long irqflags;
3080
3042 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); 3081 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3043 3082
3044 if (INTEL_GEN(dev_priv) < 4) 3083 if (INTEL_GEN(dev_priv) >= 4)
3084 intel_crtc->dspaddr_offset = plane_state->main.offset;
3085 else
3045 intel_crtc->dspaddr_offset = linear_offset; 3086 intel_crtc->dspaddr_offset = linear_offset;
3046 3087
3047 intel_crtc->adjusted_x = x; 3088 intel_crtc->adjusted_x = x;
@@ -3068,7 +3109,12 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
3068 I915_WRITE_FW(reg, dspcntr); 3109 I915_WRITE_FW(reg, dspcntr);
3069 3110
3070 I915_WRITE_FW(DSPSTRIDE(plane), fb->pitches[0]); 3111 I915_WRITE_FW(DSPSTRIDE(plane), fb->pitches[0]);
3071 if (INTEL_GEN(dev_priv) >= 4) { 3112 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3113 I915_WRITE_FW(DSPSURF(plane),
3114 intel_plane_ggtt_offset(plane_state) +
3115 intel_crtc->dspaddr_offset);
3116 I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x);
3117 } else if (INTEL_GEN(dev_priv) >= 4) {
3072 I915_WRITE_FW(DSPSURF(plane), 3118 I915_WRITE_FW(DSPSURF(plane),
3073 intel_plane_ggtt_offset(plane_state) + 3119 intel_plane_ggtt_offset(plane_state) +
3074 intel_crtc->dspaddr_offset); 3120 intel_crtc->dspaddr_offset);
@@ -3105,101 +3151,10 @@ static void i9xx_disable_primary_plane(struct drm_plane *primary,
3105 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3151 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3106} 3152}
3107 3153
3108static void ironlake_update_primary_plane(struct drm_plane *primary,
3109 const struct intel_crtc_state *crtc_state,
3110 const struct intel_plane_state *plane_state)
3111{
3112 struct drm_device *dev = primary->dev;
3113 struct drm_i915_private *dev_priv = to_i915(dev);
3114 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3115 struct drm_framebuffer *fb = plane_state->base.fb;
3116 int plane = intel_crtc->plane;
3117 u32 linear_offset;
3118 u32 dspcntr;
3119 i915_reg_t reg = DSPCNTR(plane);
3120 unsigned int rotation = plane_state->base.rotation;
3121 int x = plane_state->base.src.x1 >> 16;
3122 int y = plane_state->base.src.y1 >> 16;
3123 unsigned long irqflags;
3124
3125 dspcntr = DISPPLANE_GAMMA_ENABLE;
3126 dspcntr |= DISPLAY_PLANE_ENABLE;
3127
3128 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3129 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3130
3131 switch (fb->format->format) {
3132 case DRM_FORMAT_C8:
3133 dspcntr |= DISPPLANE_8BPP;
3134 break;
3135 case DRM_FORMAT_RGB565:
3136 dspcntr |= DISPPLANE_BGRX565;
3137 break;
3138 case DRM_FORMAT_XRGB8888:
3139 dspcntr |= DISPPLANE_BGRX888;
3140 break;
3141 case DRM_FORMAT_XBGR8888:
3142 dspcntr |= DISPPLANE_RGBX888;
3143 break;
3144 case DRM_FORMAT_XRGB2101010:
3145 dspcntr |= DISPPLANE_BGRX101010;
3146 break;
3147 case DRM_FORMAT_XBGR2101010:
3148 dspcntr |= DISPPLANE_RGBX101010;
3149 break;
3150 default:
3151 BUG();
3152 }
3153
3154 if (fb->modifier == I915_FORMAT_MOD_X_TILED)
3155 dspcntr |= DISPPLANE_TILED;
3156
3157 if (rotation & DRM_ROTATE_180)
3158 dspcntr |= DISPPLANE_ROTATE_180;
3159
3160 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv))
3161 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3162
3163 intel_add_fb_offsets(&x, &y, plane_state, 0);
3164
3165 intel_crtc->dspaddr_offset =
3166 intel_compute_tile_offset(&x, &y, plane_state, 0);
3167
3168 /* HSW+ does this automagically in hardware */
3169 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) &&
3170 rotation & DRM_ROTATE_180) {
3171 x += crtc_state->pipe_src_w - 1;
3172 y += crtc_state->pipe_src_h - 1;
3173 }
3174
3175 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3176
3177 intel_crtc->adjusted_x = x;
3178 intel_crtc->adjusted_y = y;
3179
3180 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3181
3182 I915_WRITE_FW(reg, dspcntr);
3183
3184 I915_WRITE_FW(DSPSTRIDE(plane), fb->pitches[0]);
3185 I915_WRITE_FW(DSPSURF(plane),
3186 intel_plane_ggtt_offset(plane_state) +
3187 intel_crtc->dspaddr_offset);
3188 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3189 I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x);
3190 } else {
3191 I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x);
3192 I915_WRITE_FW(DSPLINOFF(plane), linear_offset);
3193 }
3194 POSTING_READ_FW(reg);
3195
3196 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3197}
3198
3199static u32 3154static u32
3200intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane) 3155intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane)
3201{ 3156{
3202 if (fb->modifier == DRM_FORMAT_MOD_NONE) 3157 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3203 return 64; 3158 return 64;
3204 else 3159 else
3205 return intel_tile_width_bytes(fb, plane); 3160 return intel_tile_width_bytes(fb, plane);
@@ -3254,7 +3209,7 @@ u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
3254 return stride; 3209 return stride;
3255} 3210}
3256 3211
3257u32 skl_plane_ctl_format(uint32_t pixel_format) 3212static u32 skl_plane_ctl_format(uint32_t pixel_format)
3258{ 3213{
3259 switch (pixel_format) { 3214 switch (pixel_format) {
3260 case DRM_FORMAT_C8: 3215 case DRM_FORMAT_C8:
@@ -3295,10 +3250,10 @@ u32 skl_plane_ctl_format(uint32_t pixel_format)
3295 return 0; 3250 return 0;
3296} 3251}
3297 3252
3298u32 skl_plane_ctl_tiling(uint64_t fb_modifier) 3253static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
3299{ 3254{
3300 switch (fb_modifier) { 3255 switch (fb_modifier) {
3301 case DRM_FORMAT_MOD_NONE: 3256 case DRM_FORMAT_MOD_LINEAR:
3302 break; 3257 break;
3303 case I915_FORMAT_MOD_X_TILED: 3258 case I915_FORMAT_MOD_X_TILED:
3304 return PLANE_CTL_TILED_X; 3259 return PLANE_CTL_TILED_X;
@@ -3313,7 +3268,7 @@ u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
3313 return 0; 3268 return 0;
3314} 3269}
3315 3270
3316u32 skl_plane_ctl_rotation(unsigned int rotation) 3271static u32 skl_plane_ctl_rotation(unsigned int rotation)
3317{ 3272{
3318 switch (rotation) { 3273 switch (rotation) {
3319 case DRM_ROTATE_0: 3274 case DRM_ROTATE_0:
@@ -3335,6 +3290,37 @@ u32 skl_plane_ctl_rotation(unsigned int rotation)
3335 return 0; 3290 return 0;
3336} 3291}
3337 3292
3293u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
3294 const struct intel_plane_state *plane_state)
3295{
3296 struct drm_i915_private *dev_priv =
3297 to_i915(plane_state->base.plane->dev);
3298 const struct drm_framebuffer *fb = plane_state->base.fb;
3299 unsigned int rotation = plane_state->base.rotation;
3300 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
3301 u32 plane_ctl;
3302
3303 plane_ctl = PLANE_CTL_ENABLE;
3304
3305 if (!IS_GEMINILAKE(dev_priv)) {
3306 plane_ctl |=
3307 PLANE_CTL_PIPE_GAMMA_ENABLE |
3308 PLANE_CTL_PIPE_CSC_ENABLE |
3309 PLANE_CTL_PLANE_GAMMA_DISABLE;
3310 }
3311
3312 plane_ctl |= skl_plane_ctl_format(fb->format->format);
3313 plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
3314 plane_ctl |= skl_plane_ctl_rotation(rotation);
3315
3316 if (key->flags & I915_SET_COLORKEY_DESTINATION)
3317 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
3318 else if (key->flags & I915_SET_COLORKEY_SOURCE)
3319 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
3320
3321 return plane_ctl;
3322}
3323
3338static void skylake_update_primary_plane(struct drm_plane *plane, 3324static void skylake_update_primary_plane(struct drm_plane *plane,
3339 const struct intel_crtc_state *crtc_state, 3325 const struct intel_crtc_state *crtc_state,
3340 const struct intel_plane_state *plane_state) 3326 const struct intel_plane_state *plane_state)
@@ -3345,7 +3331,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
3345 struct drm_framebuffer *fb = plane_state->base.fb; 3331 struct drm_framebuffer *fb = plane_state->base.fb;
3346 enum plane_id plane_id = to_intel_plane(plane)->id; 3332 enum plane_id plane_id = to_intel_plane(plane)->id;
3347 enum pipe pipe = to_intel_plane(plane)->pipe; 3333 enum pipe pipe = to_intel_plane(plane)->pipe;
3348 u32 plane_ctl; 3334 u32 plane_ctl = plane_state->ctl;
3349 unsigned int rotation = plane_state->base.rotation; 3335 unsigned int rotation = plane_state->base.rotation;
3350 u32 stride = skl_plane_stride(fb, 0, rotation); 3336 u32 stride = skl_plane_stride(fb, 0, rotation);
3351 u32 surf_addr = plane_state->main.offset; 3337 u32 surf_addr = plane_state->main.offset;
@@ -3360,19 +3346,6 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
3360 int dst_h = drm_rect_height(&plane_state->base.dst); 3346 int dst_h = drm_rect_height(&plane_state->base.dst);
3361 unsigned long irqflags; 3347 unsigned long irqflags;
3362 3348
3363 plane_ctl = PLANE_CTL_ENABLE;
3364
3365 if (!IS_GEMINILAKE(dev_priv)) {
3366 plane_ctl |=
3367 PLANE_CTL_PIPE_GAMMA_ENABLE |
3368 PLANE_CTL_PIPE_CSC_ENABLE |
3369 PLANE_CTL_PLANE_GAMMA_DISABLE;
3370 }
3371
3372 plane_ctl |= skl_plane_ctl_format(fb->format->format);
3373 plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
3374 plane_ctl |= skl_plane_ctl_rotation(rotation);
3375
3376 /* Sizes are 0 based */ 3349 /* Sizes are 0 based */
3377 src_w--; 3350 src_w--;
3378 src_h--; 3351 src_h--;
@@ -3439,17 +3412,6 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
3439 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3412 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3440} 3413}
3441 3414
3442/* Assume fb object is pinned & idle & fenced and just update base pointers */
3443static int
3444intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3445 int x, int y, enum mode_set_atomic state)
3446{
3447 /* Support for kgdboc is disabled, this needs a major rework. */
3448 DRM_ERROR("legacy panic handler not supported any more.\n");
3449
3450 return -ENODEV;
3451}
3452
3453static void intel_complete_page_flips(struct drm_i915_private *dev_priv) 3415static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
3454{ 3416{
3455 struct intel_crtc *crtc; 3417 struct intel_crtc *crtc;
@@ -6317,6 +6279,17 @@ intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
6317static void compute_m_n(unsigned int m, unsigned int n, 6279static void compute_m_n(unsigned int m, unsigned int n,
6318 uint32_t *ret_m, uint32_t *ret_n) 6280 uint32_t *ret_m, uint32_t *ret_n)
6319{ 6281{
6282 /*
6283 * Reduce M/N as much as possible without loss in precision. Several DP
6284 * dongles in particular seem to be fussy about too large *link* M/N
6285 * values. The passed in values are more likely to have the least
6286 * significant bits zero than M after rounding below, so do this first.
6287 */
6288 while ((m & 1) == 0 && (n & 1) == 0) {
6289 m >>= 1;
6290 n >>= 1;
6291 }
6292
6320 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 6293 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
6321 *ret_m = div_u64((uint64_t) m * *ret_n, n); 6294 *ret_m = div_u64((uint64_t) m * *ret_n, n);
6322 intel_reduce_m_n_ratio(ret_m, ret_n); 6295 intel_reduce_m_n_ratio(ret_m, ret_n);
@@ -8406,7 +8379,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
8406 tiling = val & PLANE_CTL_TILED_MASK; 8379 tiling = val & PLANE_CTL_TILED_MASK;
8407 switch (tiling) { 8380 switch (tiling) {
8408 case PLANE_CTL_TILED_LINEAR: 8381 case PLANE_CTL_TILED_LINEAR:
8409 fb->modifier = DRM_FORMAT_MOD_NONE; 8382 fb->modifier = DRM_FORMAT_MOD_LINEAR;
8410 break; 8383 break;
8411 case PLANE_CTL_TILED_X: 8384 case PLANE_CTL_TILED_X:
8412 plane_config->tiling = I915_TILING_X; 8385 plane_config->tiling = I915_TILING_X;
@@ -8862,8 +8835,14 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
8862 struct intel_crtc_state *crtc_state) 8835 struct intel_crtc_state *crtc_state)
8863{ 8836{
8864 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) { 8837 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
8865 if (!intel_ddi_pll_select(crtc, crtc_state)) 8838 struct intel_encoder *encoder =
8839 intel_ddi_get_crtc_new_encoder(crtc_state);
8840
8841 if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
8842 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
8843 pipe_name(crtc->pipe));
8866 return -EINVAL; 8844 return -EINVAL;
8845 }
8867 } 8846 }
8868 8847
8869 crtc->lowfreq_avail = false; 8848 crtc->lowfreq_avail = false;
@@ -9159,6 +9138,31 @@ out:
9159 return active; 9138 return active;
9160} 9139}
9161 9140
9141static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
9142 const struct intel_plane_state *plane_state)
9143{
9144 unsigned int width = plane_state->base.crtc_w;
9145 unsigned int stride = roundup_pow_of_two(width) * 4;
9146
9147 switch (stride) {
9148 default:
9149 WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
9150 width, stride);
9151 stride = 256;
9152 /* fallthrough */
9153 case 256:
9154 case 512:
9155 case 1024:
9156 case 2048:
9157 break;
9158 }
9159
9160 return CURSOR_ENABLE |
9161 CURSOR_GAMMA_ENABLE |
9162 CURSOR_FORMAT_ARGB |
9163 CURSOR_STRIDE(stride);
9164}
9165
9162static void i845_update_cursor(struct drm_crtc *crtc, u32 base, 9166static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
9163 const struct intel_plane_state *plane_state) 9167 const struct intel_plane_state *plane_state)
9164{ 9168{
@@ -9170,26 +9174,8 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
9170 if (plane_state && plane_state->base.visible) { 9174 if (plane_state && plane_state->base.visible) {
9171 unsigned int width = plane_state->base.crtc_w; 9175 unsigned int width = plane_state->base.crtc_w;
9172 unsigned int height = plane_state->base.crtc_h; 9176 unsigned int height = plane_state->base.crtc_h;
9173 unsigned int stride = roundup_pow_of_two(width) * 4;
9174
9175 switch (stride) {
9176 default:
9177 WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
9178 width, stride);
9179 stride = 256;
9180 /* fallthrough */
9181 case 256:
9182 case 512:
9183 case 1024:
9184 case 2048:
9185 break;
9186 }
9187
9188 cntl |= CURSOR_ENABLE |
9189 CURSOR_GAMMA_ENABLE |
9190 CURSOR_FORMAT_ARGB |
9191 CURSOR_STRIDE(stride);
9192 9177
9178 cntl = plane_state->ctl;
9193 size = (height << 12) | width; 9179 size = (height << 12) | width;
9194 } 9180 }
9195 9181
@@ -9222,6 +9208,43 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
9222 } 9208 }
9223} 9209}
9224 9210
9211static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
9212 const struct intel_plane_state *plane_state)
9213{
9214 struct drm_i915_private *dev_priv =
9215 to_i915(plane_state->base.plane->dev);
9216 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9217 enum pipe pipe = crtc->pipe;
9218 u32 cntl;
9219
9220 cntl = MCURSOR_GAMMA_ENABLE;
9221
9222 if (HAS_DDI(dev_priv))
9223 cntl |= CURSOR_PIPE_CSC_ENABLE;
9224
9225 cntl |= pipe << 28; /* Connect to correct pipe */
9226
9227 switch (plane_state->base.crtc_w) {
9228 case 64:
9229 cntl |= CURSOR_MODE_64_ARGB_AX;
9230 break;
9231 case 128:
9232 cntl |= CURSOR_MODE_128_ARGB_AX;
9233 break;
9234 case 256:
9235 cntl |= CURSOR_MODE_256_ARGB_AX;
9236 break;
9237 default:
9238 MISSING_CASE(plane_state->base.crtc_w);
9239 return 0;
9240 }
9241
9242 if (plane_state->base.rotation & DRM_ROTATE_180)
9243 cntl |= CURSOR_ROTATE_180;
9244
9245 return cntl;
9246}
9247
9225static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, 9248static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
9226 const struct intel_plane_state *plane_state) 9249 const struct intel_plane_state *plane_state)
9227{ 9250{
@@ -9231,30 +9254,8 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
9231 int pipe = intel_crtc->pipe; 9254 int pipe = intel_crtc->pipe;
9232 uint32_t cntl = 0; 9255 uint32_t cntl = 0;
9233 9256
9234 if (plane_state && plane_state->base.visible) { 9257 if (plane_state && plane_state->base.visible)
9235 cntl = MCURSOR_GAMMA_ENABLE; 9258 cntl = plane_state->ctl;
9236 switch (plane_state->base.crtc_w) {
9237 case 64:
9238 cntl |= CURSOR_MODE_64_ARGB_AX;
9239 break;
9240 case 128:
9241 cntl |= CURSOR_MODE_128_ARGB_AX;
9242 break;
9243 case 256:
9244 cntl |= CURSOR_MODE_256_ARGB_AX;
9245 break;
9246 default:
9247 MISSING_CASE(plane_state->base.crtc_w);
9248 return;
9249 }
9250 cntl |= pipe << 28; /* Connect to correct pipe */
9251
9252 if (HAS_DDI(dev_priv))
9253 cntl |= CURSOR_PIPE_CSC_ENABLE;
9254
9255 if (plane_state->base.rotation & DRM_ROTATE_180)
9256 cntl |= CURSOR_ROTATE_180;
9257 }
9258 9259
9259 if (intel_crtc->cursor_cntl != cntl) { 9260 if (intel_crtc->cursor_cntl != cntl) {
9260 I915_WRITE_FW(CURCNTR(pipe), cntl); 9261 I915_WRITE_FW(CURCNTR(pipe), cntl);
@@ -9491,10 +9492,10 @@ static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
9491 return 0; 9492 return 0;
9492} 9493}
9493 9494
9494bool intel_get_load_detect_pipe(struct drm_connector *connector, 9495int intel_get_load_detect_pipe(struct drm_connector *connector,
9495 struct drm_display_mode *mode, 9496 struct drm_display_mode *mode,
9496 struct intel_load_detect_pipe *old, 9497 struct intel_load_detect_pipe *old,
9497 struct drm_modeset_acquire_ctx *ctx) 9498 struct drm_modeset_acquire_ctx *ctx)
9498{ 9499{
9499 struct intel_crtc *intel_crtc; 9500 struct intel_crtc *intel_crtc;
9500 struct intel_encoder *intel_encoder = 9501 struct intel_encoder *intel_encoder =
@@ -9517,10 +9518,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
9517 9518
9518 old->restore_state = NULL; 9519 old->restore_state = NULL;
9519 9520
9520retry: 9521 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
9521 ret = drm_modeset_lock(&config->connection_mutex, ctx);
9522 if (ret)
9523 goto fail;
9524 9522
9525 /* 9523 /*
9526 * Algorithm gets a little messy: 9524 * Algorithm gets a little messy:
@@ -9670,10 +9668,8 @@ fail:
9670 restore_state = NULL; 9668 restore_state = NULL;
9671 } 9669 }
9672 9670
9673 if (ret == -EDEADLK) { 9671 if (ret == -EDEADLK)
9674 drm_modeset_backoff(ctx); 9672 return ret;
9675 goto retry;
9676 }
9677 9673
9678 return false; 9674 return false;
9679} 9675}
@@ -10354,7 +10350,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
10354 ctl = I915_READ(PLANE_CTL(pipe, 0)); 10350 ctl = I915_READ(PLANE_CTL(pipe, 0));
10355 ctl &= ~PLANE_CTL_TILED_MASK; 10351 ctl &= ~PLANE_CTL_TILED_MASK;
10356 switch (fb->modifier) { 10352 switch (fb->modifier) {
10357 case DRM_FORMAT_MOD_NONE: 10353 case DRM_FORMAT_MOD_LINEAR:
10358 break; 10354 break;
10359 case I915_FORMAT_MOD_X_TILED: 10355 case I915_FORMAT_MOD_X_TILED:
10360 ctl |= PLANE_CTL_TILED_X; 10356 ctl |= PLANE_CTL_TILED_X;
@@ -10715,7 +10711,7 @@ out_hang:
10715 state = drm_atomic_state_alloc(dev); 10711 state = drm_atomic_state_alloc(dev);
10716 if (!state) 10712 if (!state)
10717 return -ENOMEM; 10713 return -ENOMEM;
10718 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc); 10714 state->acquire_ctx = dev->mode_config.acquire_ctx;
10719 10715
10720retry: 10716retry:
10721 plane_state = drm_atomic_get_plane_state(state, primary); 10717 plane_state = drm_atomic_get_plane_state(state, primary);
@@ -11005,7 +11001,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11005} 11001}
11006 11002
11007static const struct drm_crtc_helper_funcs intel_helper_funcs = { 11003static const struct drm_crtc_helper_funcs intel_helper_funcs = {
11008 .mode_set_base_atomic = intel_pipe_set_base_atomic,
11009 .atomic_begin = intel_begin_crtc_commit, 11004 .atomic_begin = intel_begin_crtc_commit,
11010 .atomic_flush = intel_finish_crtc_commit, 11005 .atomic_flush = intel_finish_crtc_commit,
11011 .atomic_check = intel_crtc_atomic_check, 11006 .atomic_check = intel_crtc_atomic_check,
@@ -11709,6 +11704,9 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
11709 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || 11704 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
11710 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 11705 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11711 PIPE_CONF_CHECK_I(limited_color_range); 11706 PIPE_CONF_CHECK_I(limited_color_range);
11707
11708 PIPE_CONF_CHECK_I(hdmi_scrambling);
11709 PIPE_CONF_CHECK_I(hdmi_high_tmds_clock_ratio);
11712 PIPE_CONF_CHECK_I(has_infoframe); 11710 PIPE_CONF_CHECK_I(has_infoframe);
11713 11711
11714 PIPE_CONF_CHECK_I(has_audio); 11712 PIPE_CONF_CHECK_I(has_audio);
@@ -13009,17 +13007,6 @@ static int intel_atomic_commit(struct drm_device *dev,
13009 struct drm_i915_private *dev_priv = to_i915(dev); 13007 struct drm_i915_private *dev_priv = to_i915(dev);
13010 int ret = 0; 13008 int ret = 0;
13011 13009
13012 /*
13013 * The intel_legacy_cursor_update() fast path takes care
13014 * of avoiding the vblank waits for simple cursor
13015 * movement and flips. For cursor on/off and size changes,
13016 * we want to perform the vblank waits so that watermark
13017 * updates happen during the correct frames. Gen9+ have
13018 * double buffered watermarks and so shouldn't need this.
13019 */
13020 if (INTEL_GEN(dev_priv) < 9)
13021 state->legacy_cursor_update = false;
13022
13023 ret = drm_atomic_helper_setup_commit(state, nonblock); 13010 ret = drm_atomic_helper_setup_commit(state, nonblock);
13024 if (ret) 13011 if (ret)
13025 return ret; 13012 return ret;
@@ -13035,6 +13022,26 @@ static int intel_atomic_commit(struct drm_device *dev,
13035 return ret; 13022 return ret;
13036 } 13023 }
13037 13024
13025 /*
13026 * The intel_legacy_cursor_update() fast path takes care
13027 * of avoiding the vblank waits for simple cursor
13028 * movement and flips. For cursor on/off and size changes,
13029 * we want to perform the vblank waits so that watermark
13030 * updates happen during the correct frames. Gen9+ have
13031 * double buffered watermarks and so shouldn't need this.
13032 *
13033 * Do this after drm_atomic_helper_setup_commit() and
13034 * intel_atomic_prepare_commit() because we still want
13035 * to skip the flip and fb cleanup waits. Although that
13036 * does risk yanking the mapping from under the display
13037 * engine.
13038 *
13039 * FIXME doing watermarks and fb cleanup from a vblank worker
13040 * (assuming we had any) would solve these problems.
13041 */
13042 if (INTEL_GEN(dev_priv) < 9)
13043 state->legacy_cursor_update = false;
13044
13038 drm_atomic_helper_swap_state(state, true); 13045 drm_atomic_helper_swap_state(state, true);
13039 dev_priv->wm.distrust_bios_wm = false; 13046 dev_priv->wm.distrust_bios_wm = false;
13040 intel_shared_dpll_swap_state(state); 13047 intel_shared_dpll_swap_state(state);
@@ -13075,7 +13082,7 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc)
13075 return; 13082 return;
13076 } 13083 }
13077 13084
13078 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc); 13085 state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
13079 13086
13080retry: 13087retry:
13081 crtc_state = drm_atomic_get_crtc_state(state, crtc); 13088 crtc_state = drm_atomic_get_crtc_state(state, crtc);
@@ -13098,50 +13105,8 @@ out:
13098 drm_atomic_state_put(state); 13105 drm_atomic_state_put(state);
13099} 13106}
13100 13107
13101/*
13102 * FIXME: Remove this once i915 is fully DRIVER_ATOMIC by calling
13103 * drm_atomic_helper_legacy_gamma_set() directly.
13104 */
13105static int intel_atomic_legacy_gamma_set(struct drm_crtc *crtc,
13106 u16 *red, u16 *green, u16 *blue,
13107 uint32_t size)
13108{
13109 struct drm_device *dev = crtc->dev;
13110 struct drm_mode_config *config = &dev->mode_config;
13111 struct drm_crtc_state *state;
13112 int ret;
13113
13114 ret = drm_atomic_helper_legacy_gamma_set(crtc, red, green, blue, size);
13115 if (ret)
13116 return ret;
13117
13118 /*
13119 * Make sure we update the legacy properties so this works when
13120 * atomic is not enabled.
13121 */
13122
13123 state = crtc->state;
13124
13125 drm_object_property_set_value(&crtc->base,
13126 config->degamma_lut_property,
13127 (state->degamma_lut) ?
13128 state->degamma_lut->base.id : 0);
13129
13130 drm_object_property_set_value(&crtc->base,
13131 config->ctm_property,
13132 (state->ctm) ?
13133 state->ctm->base.id : 0);
13134
13135 drm_object_property_set_value(&crtc->base,
13136 config->gamma_lut_property,
13137 (state->gamma_lut) ?
13138 state->gamma_lut->base.id : 0);
13139
13140 return 0;
13141}
13142
13143static const struct drm_crtc_funcs intel_crtc_funcs = { 13108static const struct drm_crtc_funcs intel_crtc_funcs = {
13144 .gamma_set = intel_atomic_legacy_gamma_set, 13109 .gamma_set = drm_atomic_helper_legacy_gamma_set,
13145 .set_config = drm_atomic_helper_set_config, 13110 .set_config = drm_atomic_helper_set_config,
13146 .set_property = drm_atomic_helper_crtc_set_property, 13111 .set_property = drm_atomic_helper_crtc_set_property,
13147 .destroy = intel_crtc_destroy, 13112 .destroy = intel_crtc_destroy,
@@ -13344,6 +13309,14 @@ intel_check_primary_plane(struct drm_plane *plane,
13344 ret = skl_check_plane_surface(state); 13309 ret = skl_check_plane_surface(state);
13345 if (ret) 13310 if (ret)
13346 return ret; 13311 return ret;
13312
13313 state->ctl = skl_plane_ctl(crtc_state, state);
13314 } else {
13315 ret = i9xx_check_plane_surface(state);
13316 if (ret)
13317 return ret;
13318
13319 state->ctl = i9xx_plane_ctl(crtc_state, state);
13347 } 13320 }
13348 13321
13349 return 0; 13322 return 0;
@@ -13603,12 +13576,6 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
13603 13576
13604 primary->update_plane = skylake_update_primary_plane; 13577 primary->update_plane = skylake_update_primary_plane;
13605 primary->disable_plane = skylake_disable_primary_plane; 13578 primary->disable_plane = skylake_disable_primary_plane;
13606 } else if (HAS_PCH_SPLIT(dev_priv)) {
13607 intel_primary_formats = i965_primary_formats;
13608 num_formats = ARRAY_SIZE(i965_primary_formats);
13609
13610 primary->update_plane = ironlake_update_primary_plane;
13611 primary->disable_plane = i9xx_disable_primary_plane;
13612 } else if (INTEL_GEN(dev_priv) >= 4) { 13579 } else if (INTEL_GEN(dev_priv) >= 4) {
13613 intel_primary_formats = i965_primary_formats; 13580 intel_primary_formats = i965_primary_formats;
13614 num_formats = ARRAY_SIZE(i965_primary_formats); 13581 num_formats = ARRAY_SIZE(i965_primary_formats);
@@ -13680,6 +13647,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
13680 struct intel_crtc_state *crtc_state, 13647 struct intel_crtc_state *crtc_state,
13681 struct intel_plane_state *state) 13648 struct intel_plane_state *state)
13682{ 13649{
13650 struct drm_i915_private *dev_priv = to_i915(plane->dev);
13683 struct drm_framebuffer *fb = state->base.fb; 13651 struct drm_framebuffer *fb = state->base.fb;
13684 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 13652 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13685 enum pipe pipe = to_intel_plane(plane)->pipe; 13653 enum pipe pipe = to_intel_plane(plane)->pipe;
@@ -13699,7 +13667,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
13699 return 0; 13667 return 0;
13700 13668
13701 /* Check for which cursor types we support */ 13669 /* Check for which cursor types we support */
13702 if (!cursor_size_ok(to_i915(plane->dev), state->base.crtc_w, 13670 if (!cursor_size_ok(dev_priv, state->base.crtc_w,
13703 state->base.crtc_h)) { 13671 state->base.crtc_h)) {
13704 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 13672 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
13705 state->base.crtc_w, state->base.crtc_h); 13673 state->base.crtc_w, state->base.crtc_h);
@@ -13712,7 +13680,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
13712 return -ENOMEM; 13680 return -ENOMEM;
13713 } 13681 }
13714 13682
13715 if (fb->modifier != DRM_FORMAT_MOD_NONE) { 13683 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
13716 DRM_DEBUG_KMS("cursor cannot be tiled\n"); 13684 DRM_DEBUG_KMS("cursor cannot be tiled\n");
13717 return -EINVAL; 13685 return -EINVAL;
13718 } 13686 }
@@ -13727,12 +13695,17 @@ intel_check_cursor_plane(struct drm_plane *plane,
13727 * display power well must be turned off and on again. 13695 * display power well must be turned off and on again.
13728 * Refuse the put the cursor into that compromised position. 13696 * Refuse the put the cursor into that compromised position.
13729 */ 13697 */
13730 if (IS_CHERRYVIEW(to_i915(plane->dev)) && pipe == PIPE_C && 13698 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
13731 state->base.visible && state->base.crtc_x < 0) { 13699 state->base.visible && state->base.crtc_x < 0) {
13732 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); 13700 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
13733 return -EINVAL; 13701 return -EINVAL;
13734 } 13702 }
13735 13703
13704 if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
13705 state->ctl = i845_cursor_ctl(crtc_state, state);
13706 else
13707 state->ctl = i9xx_cursor_ctl(crtc_state, state);
13708
13736 return 0; 13709 return 0;
13737} 13710}
13738 13711
@@ -14368,7 +14341,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
14368 mode_cmd->modifier[0]); 14341 mode_cmd->modifier[0]);
14369 goto err; 14342 goto err;
14370 } 14343 }
14371 case DRM_FORMAT_MOD_NONE: 14344 case DRM_FORMAT_MOD_LINEAR:
14372 case I915_FORMAT_MOD_X_TILED: 14345 case I915_FORMAT_MOD_X_TILED:
14373 break; 14346 break;
14374 default: 14347 default:
@@ -14391,7 +14364,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
14391 mode_cmd->pixel_format); 14364 mode_cmd->pixel_format);
14392 if (mode_cmd->pitches[0] > pitch_limit) { 14365 if (mode_cmd->pitches[0] > pitch_limit) {
14393 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n", 14366 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
14394 mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ? 14367 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
14395 "tiled" : "linear", 14368 "tiled" : "linear",
14396 mode_cmd->pitches[0], pitch_limit); 14369 mode_cmd->pitches[0], pitch_limit);
14397 goto err; 14370 goto err;
@@ -15084,6 +15057,7 @@ static void intel_enable_pipe_a(struct drm_device *dev)
15084 struct drm_connector *crt = NULL; 15057 struct drm_connector *crt = NULL;
15085 struct intel_load_detect_pipe load_detect_temp; 15058 struct intel_load_detect_pipe load_detect_temp;
15086 struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx; 15059 struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
15060 int ret;
15087 15061
15088 /* We can't just switch on the pipe A, we need to set things up with a 15062 /* We can't just switch on the pipe A, we need to set things up with a
15089 * proper mode and output configuration. As a gross hack, enable pipe A 15063 * proper mode and output configuration. As a gross hack, enable pipe A
@@ -15100,7 +15074,10 @@ static void intel_enable_pipe_a(struct drm_device *dev)
15100 if (!crt) 15074 if (!crt)
15101 return; 15075 return;
15102 15076
15103 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx)) 15077 ret = intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx);
15078 WARN(ret < 0, "All modeset mutexes are locked, but intel_get_load_detect_pipe failed\n");
15079
15080 if (ret > 0)
15104 intel_release_load_detect_pipe(crt, &load_detect_temp, ctx); 15081 intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
15105} 15082}
15106 15083
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index fd96a6cf7326..ee77b519835c 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -4566,7 +4566,7 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
4566 intel_dp->has_audio = false; 4566 intel_dp->has_audio = false;
4567} 4567}
4568 4568
4569static enum drm_connector_status 4569static int
4570intel_dp_long_pulse(struct intel_connector *intel_connector) 4570intel_dp_long_pulse(struct intel_connector *intel_connector)
4571{ 4571{
4572 struct drm_connector *connector = &intel_connector->base; 4572 struct drm_connector *connector = &intel_connector->base;
@@ -4577,6 +4577,8 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
4577 enum drm_connector_status status; 4577 enum drm_connector_status status;
4578 u8 sink_irq_vector = 0; 4578 u8 sink_irq_vector = 0;
4579 4579
4580 WARN_ON(!drm_modeset_is_locked(&connector->dev->mode_config.connection_mutex));
4581
4580 intel_display_power_get(to_i915(dev), intel_dp->aux_power_domain); 4582 intel_display_power_get(to_i915(dev), intel_dp->aux_power_domain);
4581 4583
4582 /* Can't disconnect eDP, but you can close the lid... */ 4584 /* Can't disconnect eDP, but you can close the lid... */
@@ -4634,16 +4636,20 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
4634 */ 4636 */
4635 status = connector_status_disconnected; 4637 status = connector_status_disconnected;
4636 goto out; 4638 goto out;
4637 } else if (connector->status == connector_status_connected) { 4639 } else {
4638 /* 4640 /*
4639 * If display was connected already and is still connected 4641 * If display is now connected check links status,
4640 * check links status, there has been known issues of 4642 * there has been known issues of link loss triggerring
4641 * link loss triggerring long pulse!!!! 4643 * long pulse.
4644 *
4645 * Some sinks (eg. ASUS PB287Q) seem to perform some
4646 * weird HPD ping pong during modesets. So we can apparently
4647 * end up with HPD going low during a modeset, and then
4648 * going back up soon after. And once that happens we must
4649 * retrain the link to get a picture. That's in case no
4650 * userspace component reacted to intermittent HPD dip.
4642 */ 4651 */
4643 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4644 intel_dp_check_link_status(intel_dp); 4652 intel_dp_check_link_status(intel_dp);
4645 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4646 goto out;
4647 } 4653 }
4648 4654
4649 /* 4655 /*
@@ -4682,11 +4688,13 @@ out:
4682 return status; 4688 return status;
4683} 4689}
4684 4690
4685static enum drm_connector_status 4691static int
4686intel_dp_detect(struct drm_connector *connector, bool force) 4692intel_dp_detect(struct drm_connector *connector,
4693 struct drm_modeset_acquire_ctx *ctx,
4694 bool force)
4687{ 4695{
4688 struct intel_dp *intel_dp = intel_attached_dp(connector); 4696 struct intel_dp *intel_dp = intel_attached_dp(connector);
4689 enum drm_connector_status status = connector->status; 4697 int status = connector->status;
4690 4698
4691 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 4699 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4692 connector->base.id, connector->name); 4700 connector->base.id, connector->name);
@@ -5014,7 +5022,6 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
5014 5022
5015static const struct drm_connector_funcs intel_dp_connector_funcs = { 5023static const struct drm_connector_funcs intel_dp_connector_funcs = {
5016 .dpms = drm_atomic_helper_connector_dpms, 5024 .dpms = drm_atomic_helper_connector_dpms,
5017 .detect = intel_dp_detect,
5018 .force = intel_dp_force, 5025 .force = intel_dp_force,
5019 .fill_modes = drm_helper_probe_single_connector_modes, 5026 .fill_modes = drm_helper_probe_single_connector_modes,
5020 .set_property = intel_dp_set_property, 5027 .set_property = intel_dp_set_property,
@@ -5027,6 +5034,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
5027}; 5034};
5028 5035
5029static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 5036static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5037 .detect_ctx = intel_dp_detect,
5030 .get_modes = intel_dp_get_modes, 5038 .get_modes = intel_dp_get_modes,
5031 .mode_valid = intel_dp_mode_valid, 5039 .mode_valid = intel_dp_mode_valid,
5032}; 5040};
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 51228fe4283b..aaee3949a422 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -398,6 +398,9 @@ struct intel_plane_state {
398 int x, y; 398 int x, y;
399 } aux; 399 } aux;
400 400
401 /* plane control register */
402 u32 ctl;
403
401 /* 404 /*
402 * scaler_id 405 * scaler_id
403 * = -1 : not using a scaler 406 * = -1 : not using a scaler
@@ -729,6 +732,12 @@ struct intel_crtc_state {
729 732
730 /* bitmask of visible planes (enum plane_id) */ 733 /* bitmask of visible planes (enum plane_id) */
731 u8 active_planes; 734 u8 active_planes;
735
736 /* HDMI scrambling status */
737 bool hdmi_scrambling;
738
739 /* HDMI High TMDS char rate ratio */
740 bool hdmi_high_tmds_clock_ratio;
732}; 741};
733 742
734struct intel_crtc { 743struct intel_crtc {
@@ -1220,12 +1229,9 @@ void intel_crt_init(struct drm_i915_private *dev_priv);
1220void intel_crt_reset(struct drm_encoder *encoder); 1229void intel_crt_reset(struct drm_encoder *encoder);
1221 1230
1222/* intel_ddi.c */ 1231/* intel_ddi.c */
1223void intel_ddi_clk_select(struct intel_encoder *encoder,
1224 struct intel_shared_dpll *pll);
1225void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder, 1232void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
1226 struct intel_crtc_state *old_crtc_state, 1233 struct intel_crtc_state *old_crtc_state,
1227 struct drm_connector_state *old_conn_state); 1234 struct drm_connector_state *old_conn_state);
1228void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder);
1229void hsw_fdi_link_train(struct intel_crtc *crtc, 1235void hsw_fdi_link_train(struct intel_crtc *crtc,
1230 const struct intel_crtc_state *crtc_state); 1236 const struct intel_crtc_state *crtc_state);
1231void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port); 1237void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
@@ -1236,8 +1242,8 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
1236 enum transcoder cpu_transcoder); 1242 enum transcoder cpu_transcoder);
1237void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state); 1243void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
1238void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state); 1244void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
1239bool intel_ddi_pll_select(struct intel_crtc *crtc, 1245struct intel_encoder *
1240 struct intel_crtc_state *crtc_state); 1246intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state);
1241void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state); 1247void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
1242void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp); 1248void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
1243bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); 1249bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
@@ -1246,7 +1252,6 @@ bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
1246void intel_ddi_get_config(struct intel_encoder *encoder, 1252void intel_ddi_get_config(struct intel_encoder *encoder,
1247 struct intel_crtc_state *pipe_config); 1253 struct intel_crtc_state *pipe_config);
1248 1254
1249void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
1250void intel_ddi_clock_get(struct intel_encoder *encoder, 1255void intel_ddi_clock_get(struct intel_encoder *encoder,
1251 struct intel_crtc_state *pipe_config); 1256 struct intel_crtc_state *pipe_config);
1252void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state, 1257void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
@@ -1353,10 +1358,10 @@ int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
1353void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1358void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1354 struct intel_digital_port *dport, 1359 struct intel_digital_port *dport,
1355 unsigned int expected_mask); 1360 unsigned int expected_mask);
1356bool intel_get_load_detect_pipe(struct drm_connector *connector, 1361int intel_get_load_detect_pipe(struct drm_connector *connector,
1357 struct drm_display_mode *mode, 1362 struct drm_display_mode *mode,
1358 struct intel_load_detect_pipe *old, 1363 struct intel_load_detect_pipe *old,
1359 struct drm_modeset_acquire_ctx *ctx); 1364 struct drm_modeset_acquire_ctx *ctx);
1360void intel_release_load_detect_pipe(struct drm_connector *connector, 1365void intel_release_load_detect_pipe(struct drm_connector *connector,
1361 struct intel_load_detect_pipe *old, 1366 struct intel_load_detect_pipe *old,
1362 struct drm_modeset_acquire_ctx *ctx); 1367 struct drm_modeset_acquire_ctx *ctx);
@@ -1445,12 +1450,12 @@ static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
1445 return i915_ggtt_offset(state->vma); 1450 return i915_ggtt_offset(state->vma);
1446} 1451}
1447 1452
1448u32 skl_plane_ctl_format(uint32_t pixel_format); 1453u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
1449u32 skl_plane_ctl_tiling(uint64_t fb_modifier); 1454 const struct intel_plane_state *plane_state);
1450u32 skl_plane_ctl_rotation(unsigned int rotation);
1451u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane, 1455u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
1452 unsigned int rotation); 1456 unsigned int rotation);
1453int skl_check_plane_surface(struct intel_plane_state *plane_state); 1457int skl_check_plane_surface(struct intel_plane_state *plane_state);
1458int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
1454 1459
1455/* intel_csr.c */ 1460/* intel_csr.c */
1456void intel_csr_ucode_init(struct drm_i915_private *); 1461void intel_csr_ucode_init(struct drm_i915_private *);
@@ -1620,6 +1625,10 @@ struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
1620bool intel_hdmi_compute_config(struct intel_encoder *encoder, 1625bool intel_hdmi_compute_config(struct intel_encoder *encoder,
1621 struct intel_crtc_state *pipe_config, 1626 struct intel_crtc_state *pipe_config,
1622 struct drm_connector_state *conn_state); 1627 struct drm_connector_state *conn_state);
1628void intel_hdmi_handle_sink_scrambling(struct intel_encoder *intel_encoder,
1629 struct drm_connector *connector,
1630 bool high_tmds_clock_ratio,
1631 bool scrambling);
1623void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable); 1632void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
1624 1633
1625 1634
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 4200faa520c7..854e8e0c836b 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -36,45 +36,45 @@ static const struct engine_info {
36 int (*init_execlists)(struct intel_engine_cs *engine); 36 int (*init_execlists)(struct intel_engine_cs *engine);
37} intel_engines[] = { 37} intel_engines[] = {
38 [RCS] = { 38 [RCS] = {
39 .name = "render ring", 39 .name = "rcs",
40 .exec_id = I915_EXEC_RENDER,
41 .hw_id = RCS_HW, 40 .hw_id = RCS_HW,
41 .exec_id = I915_EXEC_RENDER,
42 .mmio_base = RENDER_RING_BASE, 42 .mmio_base = RENDER_RING_BASE,
43 .irq_shift = GEN8_RCS_IRQ_SHIFT, 43 .irq_shift = GEN8_RCS_IRQ_SHIFT,
44 .init_execlists = logical_render_ring_init, 44 .init_execlists = logical_render_ring_init,
45 .init_legacy = intel_init_render_ring_buffer, 45 .init_legacy = intel_init_render_ring_buffer,
46 }, 46 },
47 [BCS] = { 47 [BCS] = {
48 .name = "blitter ring", 48 .name = "bcs",
49 .exec_id = I915_EXEC_BLT,
50 .hw_id = BCS_HW, 49 .hw_id = BCS_HW,
50 .exec_id = I915_EXEC_BLT,
51 .mmio_base = BLT_RING_BASE, 51 .mmio_base = BLT_RING_BASE,
52 .irq_shift = GEN8_BCS_IRQ_SHIFT, 52 .irq_shift = GEN8_BCS_IRQ_SHIFT,
53 .init_execlists = logical_xcs_ring_init, 53 .init_execlists = logical_xcs_ring_init,
54 .init_legacy = intel_init_blt_ring_buffer, 54 .init_legacy = intel_init_blt_ring_buffer,
55 }, 55 },
56 [VCS] = { 56 [VCS] = {
57 .name = "bsd ring", 57 .name = "vcs",
58 .exec_id = I915_EXEC_BSD,
59 .hw_id = VCS_HW, 58 .hw_id = VCS_HW,
59 .exec_id = I915_EXEC_BSD,
60 .mmio_base = GEN6_BSD_RING_BASE, 60 .mmio_base = GEN6_BSD_RING_BASE,
61 .irq_shift = GEN8_VCS1_IRQ_SHIFT, 61 .irq_shift = GEN8_VCS1_IRQ_SHIFT,
62 .init_execlists = logical_xcs_ring_init, 62 .init_execlists = logical_xcs_ring_init,
63 .init_legacy = intel_init_bsd_ring_buffer, 63 .init_legacy = intel_init_bsd_ring_buffer,
64 }, 64 },
65 [VCS2] = { 65 [VCS2] = {
66 .name = "bsd2 ring", 66 .name = "vcs2",
67 .exec_id = I915_EXEC_BSD,
68 .hw_id = VCS2_HW, 67 .hw_id = VCS2_HW,
68 .exec_id = I915_EXEC_BSD,
69 .mmio_base = GEN8_BSD2_RING_BASE, 69 .mmio_base = GEN8_BSD2_RING_BASE,
70 .irq_shift = GEN8_VCS2_IRQ_SHIFT, 70 .irq_shift = GEN8_VCS2_IRQ_SHIFT,
71 .init_execlists = logical_xcs_ring_init, 71 .init_execlists = logical_xcs_ring_init,
72 .init_legacy = intel_init_bsd2_ring_buffer, 72 .init_legacy = intel_init_bsd2_ring_buffer,
73 }, 73 },
74 [VECS] = { 74 [VECS] = {
75 .name = "video enhancement ring", 75 .name = "vecs",
76 .exec_id = I915_EXEC_VEBOX,
77 .hw_id = VECS_HW, 76 .hw_id = VECS_HW,
77 .exec_id = I915_EXEC_VEBOX,
78 .mmio_base = VEBOX_RING_BASE, 78 .mmio_base = VEBOX_RING_BASE,
79 .irq_shift = GEN8_VECS_IRQ_SHIFT, 79 .irq_shift = GEN8_VECS_IRQ_SHIFT,
80 .init_execlists = logical_xcs_ring_init, 80 .init_execlists = logical_xcs_ring_init,
@@ -242,12 +242,12 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
242 void *semaphores; 242 void *semaphores;
243 243
244 /* Semaphores are in noncoherent memory, flush to be safe */ 244 /* Semaphores are in noncoherent memory, flush to be safe */
245 semaphores = kmap(page); 245 semaphores = kmap_atomic(page);
246 memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0), 246 memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
247 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size); 247 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
248 drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0), 248 drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
249 I915_NUM_ENGINES * gen8_semaphore_seqno_size); 249 I915_NUM_ENGINES * gen8_semaphore_seqno_size);
250 kunmap(page); 250 kunmap_atomic(semaphores);
251 } 251 }
252 252
253 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); 253 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
@@ -1111,6 +1111,15 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
1111 struct intel_engine_cs *engine; 1111 struct intel_engine_cs *engine;
1112 enum intel_engine_id id; 1112 enum intel_engine_id id;
1113 1113
1114 if (READ_ONCE(dev_priv->gt.active_requests))
1115 return false;
1116
1117 /* If the driver is wedged, HW state may be very inconsistent and
1118 * report that it is still busy, even though we have stopped using it.
1119 */
1120 if (i915_terminally_wedged(&dev_priv->gpu_error))
1121 return true;
1122
1114 for_each_engine(engine, dev_priv, id) { 1123 for_each_engine(engine, dev_priv, id) {
1115 if (!intel_engine_is_idle(engine)) 1124 if (!intel_engine_is_idle(engine))
1116 return false; 1125 return false;
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 25691f0e4c50..cb36cbf3818f 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -26,14 +26,14 @@
26#define GFXCORE_FAMILY_GEN9 12 26#define GFXCORE_FAMILY_GEN9 12
27#define GFXCORE_FAMILY_UNKNOWN 0x7fffffff 27#define GFXCORE_FAMILY_UNKNOWN 0x7fffffff
28 28
29#define GUC_CTX_PRIORITY_KMD_HIGH 0 29#define GUC_CLIENT_PRIORITY_KMD_HIGH 0
30#define GUC_CTX_PRIORITY_HIGH 1 30#define GUC_CLIENT_PRIORITY_HIGH 1
31#define GUC_CTX_PRIORITY_KMD_NORMAL 2 31#define GUC_CLIENT_PRIORITY_KMD_NORMAL 2
32#define GUC_CTX_PRIORITY_NORMAL 3 32#define GUC_CLIENT_PRIORITY_NORMAL 3
33#define GUC_CTX_PRIORITY_NUM 4 33#define GUC_CLIENT_PRIORITY_NUM 4
34 34
35#define GUC_MAX_GPU_CONTEXTS 1024 35#define GUC_MAX_STAGE_DESCRIPTORS 1024
36#define GUC_INVALID_CTX_ID GUC_MAX_GPU_CONTEXTS 36#define GUC_INVALID_STAGE_ID GUC_MAX_STAGE_DESCRIPTORS
37 37
38#define GUC_RENDER_ENGINE 0 38#define GUC_RENDER_ENGINE 0
39#define GUC_VIDEO_ENGINE 1 39#define GUC_VIDEO_ENGINE 1
@@ -68,14 +68,14 @@
68#define GUC_DOORBELL_ENABLED 1 68#define GUC_DOORBELL_ENABLED 1
69#define GUC_DOORBELL_DISABLED 0 69#define GUC_DOORBELL_DISABLED 0
70 70
71#define GUC_CTX_DESC_ATTR_ACTIVE (1 << 0) 71#define GUC_STAGE_DESC_ATTR_ACTIVE BIT(0)
72#define GUC_CTX_DESC_ATTR_PENDING_DB (1 << 1) 72#define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1)
73#define GUC_CTX_DESC_ATTR_KERNEL (1 << 2) 73#define GUC_STAGE_DESC_ATTR_KERNEL BIT(2)
74#define GUC_CTX_DESC_ATTR_PREEMPT (1 << 3) 74#define GUC_STAGE_DESC_ATTR_PREEMPT BIT(3)
75#define GUC_CTX_DESC_ATTR_RESET (1 << 4) 75#define GUC_STAGE_DESC_ATTR_RESET BIT(4)
76#define GUC_CTX_DESC_ATTR_WQLOCKED (1 << 5) 76#define GUC_STAGE_DESC_ATTR_WQLOCKED BIT(5)
77#define GUC_CTX_DESC_ATTR_PCH (1 << 6) 77#define GUC_STAGE_DESC_ATTR_PCH BIT(6)
78#define GUC_CTX_DESC_ATTR_TERMINATED (1 << 7) 78#define GUC_STAGE_DESC_ATTR_TERMINATED BIT(7)
79 79
80/* The guc control data is 10 DWORDs */ 80/* The guc control data is 10 DWORDs */
81#define GUC_CTL_CTXINFO 0 81#define GUC_CTL_CTXINFO 0
@@ -241,8 +241,8 @@ union guc_doorbell_qw {
241 u64 value_qw; 241 u64 value_qw;
242} __packed; 242} __packed;
243 243
244#define GUC_MAX_DOORBELLS 256 244#define GUC_NUM_DOORBELLS 256
245#define GUC_INVALID_DOORBELL_ID (GUC_MAX_DOORBELLS) 245#define GUC_DOORBELL_INVALID (GUC_NUM_DOORBELLS)
246 246
247#define GUC_DB_SIZE (PAGE_SIZE) 247#define GUC_DB_SIZE (PAGE_SIZE)
248#define GUC_WQ_SIZE (PAGE_SIZE * 2) 248#define GUC_WQ_SIZE (PAGE_SIZE * 2)
@@ -251,12 +251,12 @@ union guc_doorbell_qw {
251struct guc_wq_item { 251struct guc_wq_item {
252 u32 header; 252 u32 header;
253 u32 context_desc; 253 u32 context_desc;
254 u32 ring_tail; 254 u32 submit_element_info;
255 u32 fence_id; 255 u32 fence_id;
256} __packed; 256} __packed;
257 257
258struct guc_process_desc { 258struct guc_process_desc {
259 u32 context_id; 259 u32 stage_id;
260 u64 db_base_addr; 260 u64 db_base_addr;
261 u32 head; 261 u32 head;
262 u32 tail; 262 u32 tail;
@@ -278,7 +278,7 @@ struct guc_execlist_context {
278 u32 context_desc; 278 u32 context_desc;
279 u32 context_id; 279 u32 context_id;
280 u32 ring_status; 280 u32 ring_status;
281 u32 ring_lcra; 281 u32 ring_lrca;
282 u32 ring_begin; 282 u32 ring_begin;
283 u32 ring_end; 283 u32 ring_end;
284 u32 ring_next_free_location; 284 u32 ring_next_free_location;
@@ -289,10 +289,18 @@ struct guc_execlist_context {
289 u16 engine_submit_queue_count; 289 u16 engine_submit_queue_count;
290} __packed; 290} __packed;
291 291
292/*Context descriptor for communicating between uKernel and Driver*/ 292/*
293struct guc_context_desc { 293 * This structure describes a stage set arranged for a particular communication
294 * between uKernel (GuC) and Driver (KMD). Technically, this is known as a
295 * "GuC Context descriptor" in the specs, but we use the term "stage descriptor"
296 * to avoid confusion with all the other things already named "context" in the
297 * driver. A static pool of these descriptors are stored inside a GEM object
298 * (stage_desc_pool) which is held for the entire lifetime of our interaction
299 * with the GuC, being allocated before the GuC is loaded with its firmware.
300 */
301struct guc_stage_desc {
294 u32 sched_common_area; 302 u32 sched_common_area;
295 u32 context_id; 303 u32 stage_id;
296 u32 pas_id; 304 u32 pas_id;
297 u8 engines_used; 305 u8 engines_used;
298 u64 db_trigger_cpu; 306 u64 db_trigger_cpu;
@@ -359,7 +367,7 @@ struct guc_policy {
359} __packed; 367} __packed;
360 368
361struct guc_policies { 369struct guc_policies {
362 struct guc_policy policy[GUC_CTX_PRIORITY_NUM][GUC_MAX_ENGINES_NUM]; 370 struct guc_policy policy[GUC_CLIENT_PRIORITY_NUM][GUC_MAX_ENGINES_NUM];
363 371
364 /* In micro seconds. How much time to allow before DPC processing is 372 /* In micro seconds. How much time to allow before DPC processing is
365 * called back via interrupt (to prevent DPC queue drain starving). 373 * called back via interrupt (to prevent DPC queue drain starving).
@@ -401,16 +409,17 @@ struct guc_mmio_regset {
401 u32 number_of_registers; 409 u32 number_of_registers;
402} __packed; 410} __packed;
403 411
412/* MMIO registers that are set as non privileged */
413struct mmio_white_list {
414 u32 mmio_start;
415 u32 offsets[GUC_MMIO_WHITE_LIST_MAX];
416 u32 count;
417} __packed;
418
404struct guc_mmio_reg_state { 419struct guc_mmio_reg_state {
405 struct guc_mmio_regset global_reg; 420 struct guc_mmio_regset global_reg;
406 struct guc_mmio_regset engine_reg[GUC_MAX_ENGINES_NUM]; 421 struct guc_mmio_regset engine_reg[GUC_MAX_ENGINES_NUM];
407 422 struct mmio_white_list white_list[GUC_MAX_ENGINES_NUM];
408 /* MMIO registers that are set as non privileged */
409 struct __packed {
410 u32 mmio_start;
411 u32 offsets[GUC_MMIO_WHITE_LIST_MAX];
412 u32 count;
413 } mmio_white_list[GUC_MAX_ENGINES_NUM];
414} __packed; 423} __packed;
415 424
416/* GuC Additional Data Struct */ 425/* GuC Additional Data Struct */
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 2f270d02894c..8a1a023e48b2 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -73,22 +73,6 @@ MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
73#define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR) 73#define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR)
74MODULE_FIRMWARE(I915_KBL_GUC_UCODE); 74MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
75 75
76/* User-friendly representation of an enum */
77const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
78{
79 switch (status) {
80 case INTEL_UC_FIRMWARE_FAIL:
81 return "FAIL";
82 case INTEL_UC_FIRMWARE_NONE:
83 return "NONE";
84 case INTEL_UC_FIRMWARE_PENDING:
85 return "PENDING";
86 case INTEL_UC_FIRMWARE_SUCCESS:
87 return "SUCCESS";
88 default:
89 return "UNKNOWN!";
90 }
91};
92 76
93static u32 get_gttype(struct drm_i915_private *dev_priv) 77static u32 get_gttype(struct drm_i915_private *dev_priv)
94{ 78{
@@ -148,16 +132,14 @@ static void guc_params_init(struct drm_i915_private *dev_priv)
148 } else 132 } else
149 params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED; 133 params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
150 134
151 if (guc->ads_vma) { 135 /* If GuC submission is enabled, set up additional parameters here */
136 if (i915.enable_guc_submission) {
152 u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT; 137 u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
138 u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool);
139 u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
140
153 params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT; 141 params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
154 params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED; 142 params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
155 }
156
157 /* If GuC submission is enabled, set up additional parameters here */
158 if (i915.enable_guc_submission) {
159 u32 pgs = guc_ggtt_offset(dev_priv->guc.ctx_pool_vma);
160 u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
161 143
162 pgs >>= PAGE_SHIFT; 144 pgs >>= PAGE_SHIFT;
163 params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) | 145 params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
@@ -430,24 +412,3 @@ int intel_guc_select_fw(struct intel_guc *guc)
430 412
431 return 0; 413 return 0;
432} 414}
433
434/**
435 * intel_guc_fini() - clean up all allocated resources
436 * @dev_priv: i915 device private
437 */
438void intel_guc_fini(struct drm_i915_private *dev_priv)
439{
440 struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
441 struct drm_i915_gem_object *obj;
442
443 mutex_lock(&dev_priv->drm.struct_mutex);
444 i915_guc_submission_disable(dev_priv);
445 i915_guc_submission_fini(dev_priv);
446 mutex_unlock(&dev_priv->drm.struct_mutex);
447
448 obj = fetch_and_zero(&guc_fw->obj);
449 if (obj)
450 i915_gem_object_put(obj);
451
452 guc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
453}
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 5c0f9a49da0e..6fb63a3c65b0 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -66,7 +66,6 @@ static int guc_log_control(struct intel_guc *guc, u32 control_val)
66 return intel_guc_send(guc, action, ARRAY_SIZE(action)); 66 return intel_guc_send(guc, action, ARRAY_SIZE(action));
67} 67}
68 68
69
70/* 69/*
71 * Sub buffer switch callback. Called whenever relay has to switch to a new 70 * Sub buffer switch callback. Called whenever relay has to switch to a new
72 * sub buffer, relay stays on the same sub buffer if 0 is returned. 71 * sub buffer, relay stays on the same sub buffer if 0 is returned.
@@ -139,45 +138,15 @@ static struct rchan_callbacks relay_callbacks = {
139 .remove_buf_file = remove_buf_file_callback, 138 .remove_buf_file = remove_buf_file_callback,
140}; 139};
141 140
142static void guc_log_remove_relay_file(struct intel_guc *guc) 141static int guc_log_relay_file_create(struct intel_guc *guc)
143{
144 relay_close(guc->log.relay_chan);
145}
146
147static int guc_log_create_relay_channel(struct intel_guc *guc)
148{
149 struct drm_i915_private *dev_priv = guc_to_i915(guc);
150 struct rchan *guc_log_relay_chan;
151 size_t n_subbufs, subbuf_size;
152
153 /* Keep the size of sub buffers same as shared log buffer */
154 subbuf_size = guc->log.vma->obj->base.size;
155
156 /* Store up to 8 snapshots, which is large enough to buffer sufficient
157 * boot time logs and provides enough leeway to User, in terms of
158 * latency, for consuming the logs from relay. Also doesn't take
159 * up too much memory.
160 */
161 n_subbufs = 8;
162
163 guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
164 n_subbufs, &relay_callbacks, dev_priv);
165 if (!guc_log_relay_chan) {
166 DRM_ERROR("Couldn't create relay chan for GuC logging\n");
167 return -ENOMEM;
168 }
169
170 GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
171 guc->log.relay_chan = guc_log_relay_chan;
172 return 0;
173}
174
175static int guc_log_create_relay_file(struct intel_guc *guc)
176{ 142{
177 struct drm_i915_private *dev_priv = guc_to_i915(guc); 143 struct drm_i915_private *dev_priv = guc_to_i915(guc);
178 struct dentry *log_dir; 144 struct dentry *log_dir;
179 int ret; 145 int ret;
180 146
147 if (i915.guc_log_level < 0)
148 return 0;
149
181 /* For now create the log file in /sys/kernel/debug/dri/0 dir */ 150 /* For now create the log file in /sys/kernel/debug/dri/0 dir */
182 log_dir = dev_priv->drm.primary->debugfs_root; 151 log_dir = dev_priv->drm.primary->debugfs_root;
183 152
@@ -197,8 +166,8 @@ static int guc_log_create_relay_file(struct intel_guc *guc)
197 return -ENODEV; 166 return -ENODEV;
198 } 167 }
199 168
200 ret = relay_late_setup_files(guc->log.relay_chan, "guc_log", log_dir); 169 ret = relay_late_setup_files(guc->log.runtime.relay_chan, "guc_log", log_dir);
201 if (ret) { 170 if (ret < 0 && ret != -EEXIST) {
202 DRM_ERROR("Couldn't associate relay chan with file %d\n", ret); 171 DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
203 return ret; 172 return ret;
204 } 173 }
@@ -214,15 +183,15 @@ static void guc_move_to_next_buf(struct intel_guc *guc)
214 smp_wmb(); 183 smp_wmb();
215 184
216 /* All data has been written, so now move the offset of sub buffer. */ 185 /* All data has been written, so now move the offset of sub buffer. */
217 relay_reserve(guc->log.relay_chan, guc->log.vma->obj->base.size); 186 relay_reserve(guc->log.runtime.relay_chan, guc->log.vma->obj->base.size);
218 187
219 /* Switch to the next sub buffer */ 188 /* Switch to the next sub buffer */
220 relay_flush(guc->log.relay_chan); 189 relay_flush(guc->log.runtime.relay_chan);
221} 190}
222 191
223static void *guc_get_write_buffer(struct intel_guc *guc) 192static void *guc_get_write_buffer(struct intel_guc *guc)
224{ 193{
225 if (!guc->log.relay_chan) 194 if (!guc->log.runtime.relay_chan)
226 return NULL; 195 return NULL;
227 196
228 /* Just get the base address of a new sub buffer and copy data into it 197 /* Just get the base address of a new sub buffer and copy data into it
@@ -233,7 +202,7 @@ static void *guc_get_write_buffer(struct intel_guc *guc)
233 * done without using relay_reserve() along with relay_write(). So its 202 * done without using relay_reserve() along with relay_write(). So its
234 * better to use relay_reserve() alone. 203 * better to use relay_reserve() alone.
235 */ 204 */
236 return relay_reserve(guc->log.relay_chan, 0); 205 return relay_reserve(guc->log.runtime.relay_chan, 0);
237} 206}
238 207
239static bool guc_check_log_buf_overflow(struct intel_guc *guc, 208static bool guc_check_log_buf_overflow(struct intel_guc *guc,
@@ -284,11 +253,11 @@ static void guc_read_update_log_buffer(struct intel_guc *guc)
284 void *src_data, *dst_data; 253 void *src_data, *dst_data;
285 bool new_overflow; 254 bool new_overflow;
286 255
287 if (WARN_ON(!guc->log.buf_addr)) 256 if (WARN_ON(!guc->log.runtime.buf_addr))
288 return; 257 return;
289 258
290 /* Get the pointer to shared GuC log buffer */ 259 /* Get the pointer to shared GuC log buffer */
291 log_buf_state = src_data = guc->log.buf_addr; 260 log_buf_state = src_data = guc->log.runtime.buf_addr;
292 261
293 /* Get the pointer to local buffer to store the logs */ 262 /* Get the pointer to local buffer to store the logs */
294 log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc); 263 log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);
@@ -371,153 +340,113 @@ static void guc_read_update_log_buffer(struct intel_guc *guc)
371 } 340 }
372} 341}
373 342
374static void guc_log_cleanup(struct intel_guc *guc)
375{
376 struct drm_i915_private *dev_priv = guc_to_i915(guc);
377
378 lockdep_assert_held(&dev_priv->drm.struct_mutex);
379
380 /* First disable the flush interrupt */
381 gen9_disable_guc_interrupts(dev_priv);
382
383 if (guc->log.flush_wq)
384 destroy_workqueue(guc->log.flush_wq);
385
386 guc->log.flush_wq = NULL;
387
388 if (guc->log.relay_chan)
389 guc_log_remove_relay_file(guc);
390
391 guc->log.relay_chan = NULL;
392
393 if (guc->log.buf_addr)
394 i915_gem_object_unpin_map(guc->log.vma->obj);
395
396 guc->log.buf_addr = NULL;
397}
398
399static void capture_logs_work(struct work_struct *work) 343static void capture_logs_work(struct work_struct *work)
400{ 344{
401 struct intel_guc *guc = 345 struct intel_guc *guc =
402 container_of(work, struct intel_guc, log.flush_work); 346 container_of(work, struct intel_guc, log.runtime.flush_work);
403 347
404 guc_log_capture_logs(guc); 348 guc_log_capture_logs(guc);
405} 349}
406 350
407static int guc_log_create_extras(struct intel_guc *guc) 351static bool guc_log_has_runtime(struct intel_guc *guc)
352{
353 return guc->log.runtime.buf_addr != NULL;
354}
355
356static int guc_log_runtime_create(struct intel_guc *guc)
408{ 357{
409 struct drm_i915_private *dev_priv = guc_to_i915(guc); 358 struct drm_i915_private *dev_priv = guc_to_i915(guc);
410 void *vaddr; 359 void *vaddr;
411 int ret; 360 struct rchan *guc_log_relay_chan;
361 size_t n_subbufs, subbuf_size;
362 int ret = 0;
412 363
413 lockdep_assert_held(&dev_priv->drm.struct_mutex); 364 lockdep_assert_held(&dev_priv->drm.struct_mutex);
414 365
415 /* Nothing to do */ 366 GEM_BUG_ON(guc_log_has_runtime(guc));
416 if (i915.guc_log_level < 0)
417 return 0;
418
419 if (!guc->log.buf_addr) {
420 /* Create a WC (Uncached for read) vmalloc mapping of log
421 * buffer pages, so that we can directly get the data
422 * (up-to-date) from memory.
423 */
424 vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
425 if (IS_ERR(vaddr)) {
426 ret = PTR_ERR(vaddr);
427 DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
428 return ret;
429 }
430 367
431 guc->log.buf_addr = vaddr; 368 /* Create a WC (Uncached for read) vmalloc mapping of log
369 * buffer pages, so that we can directly get the data
370 * (up-to-date) from memory.
371 */
372 vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
373 if (IS_ERR(vaddr)) {
374 DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
375 return PTR_ERR(vaddr);
432 } 376 }
433 377
434 if (!guc->log.relay_chan) { 378 guc->log.runtime.buf_addr = vaddr;
435 /* Create a relay channel, so that we have buffers for storing
436 * the GuC firmware logs, the channel will be linked with a file
437 * later on when debugfs is registered.
438 */
439 ret = guc_log_create_relay_channel(guc);
440 if (ret)
441 return ret;
442 }
443 379
444 if (!guc->log.flush_wq) { 380 /* Keep the size of sub buffers same as shared log buffer */
445 INIT_WORK(&guc->log.flush_work, capture_logs_work); 381 subbuf_size = guc->log.vma->obj->base.size;
446
447 /*
448 * GuC log buffer flush work item has to do register access to
449 * send the ack to GuC and this work item, if not synced before
450 * suspend, can potentially get executed after the GFX device is
451 * suspended.
452 * By marking the WQ as freezable, we don't have to bother about
453 * flushing of this work item from the suspend hooks, the pending
454 * work item if any will be either executed before the suspend
455 * or scheduled later on resume. This way the handling of work
456 * item can be kept same between system suspend & rpm suspend.
457 */
458 guc->log.flush_wq = alloc_ordered_workqueue("i915-guc_log",
459 WQ_HIGHPRI | WQ_FREEZABLE);
460 if (guc->log.flush_wq == NULL) {
461 DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
462 return -ENOMEM;
463 }
464 }
465
466 return 0;
467}
468
469void intel_guc_log_create(struct intel_guc *guc)
470{
471 struct i915_vma *vma;
472 unsigned long offset;
473 uint32_t size, flags;
474 382
475 if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX) 383 /* Store up to 8 snapshots, which is large enough to buffer sufficient
476 i915.guc_log_level = GUC_LOG_VERBOSITY_MAX; 384 * boot time logs and provides enough leeway to User, in terms of
385 * latency, for consuming the logs from relay. Also doesn't take
386 * up too much memory.
387 */
388 n_subbufs = 8;
477 389
478 /* The first page is to save log buffer state. Allocate one 390 /* Create a relay channel, so that we have buffers for storing
479 * extra page for others in case for overlap */ 391 * the GuC firmware logs, the channel will be linked with a file
480 size = (1 + GUC_LOG_DPC_PAGES + 1 + 392 * later on when debugfs is registered.
481 GUC_LOG_ISR_PAGES + 1 + 393 */
482 GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT; 394 guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
395 n_subbufs, &relay_callbacks, dev_priv);
396 if (!guc_log_relay_chan) {
397 DRM_ERROR("Couldn't create relay chan for GuC logging\n");
483 398
484 vma = guc->log.vma; 399 ret = -ENOMEM;
485 if (!vma) { 400 goto err_vaddr;
486 /* We require SSE 4.1 for fast reads from the GuC log buffer and 401 }
487 * it should be present on the chipsets supporting GuC based
488 * submisssions.
489 */
490 if (WARN_ON(!i915_has_memcpy_from_wc())) {
491 /* logging will not be enabled */
492 i915.guc_log_level = -1;
493 return;
494 }
495 402
496 vma = intel_guc_allocate_vma(guc, size); 403 GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
497 if (IS_ERR(vma)) { 404 guc->log.runtime.relay_chan = guc_log_relay_chan;
498 /* logging will be off */ 405
499 i915.guc_log_level = -1; 406 INIT_WORK(&guc->log.runtime.flush_work, capture_logs_work);
500 return; 407
501 } 408 /*
409 * GuC log buffer flush work item has to do register access to
410 * send the ack to GuC and this work item, if not synced before
411 * suspend, can potentially get executed after the GFX device is
412 * suspended.
413 * By marking the WQ as freezable, we don't have to bother about
414 * flushing of this work item from the suspend hooks, the pending
415 * work item if any will be either executed before the suspend
416 * or scheduled later on resume. This way the handling of work
417 * item can be kept same between system suspend & rpm suspend.
418 */
419 guc->log.runtime.flush_wq = alloc_ordered_workqueue("i915-guc_log",
420 WQ_HIGHPRI | WQ_FREEZABLE);
421 if (!guc->log.runtime.flush_wq) {
422 DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
423 ret = -ENOMEM;
424 goto err_relaychan;
425 }
502 426
503 guc->log.vma = vma; 427 return 0;
504 428
505 if (guc_log_create_extras(guc)) { 429err_relaychan:
506 guc_log_cleanup(guc); 430 relay_close(guc->log.runtime.relay_chan);
507 i915_vma_unpin_and_release(&guc->log.vma); 431err_vaddr:
508 i915.guc_log_level = -1; 432 i915_gem_object_unpin_map(guc->log.vma->obj);
509 return; 433 guc->log.runtime.buf_addr = NULL;
510 } 434 return ret;
511 } 435}
512 436
513 /* each allocated unit is a page */ 437static void guc_log_runtime_destroy(struct intel_guc *guc)
514 flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL | 438{
515 (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) | 439 /*
516 (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) | 440 * It's possible that the runtime stuff was never allocated because
517 (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT); 441 * guc_log_level was < 0 at the time
442 **/
443 if (!guc_log_has_runtime(guc))
444 return;
518 445
519 offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */ 446 destroy_workqueue(guc->log.runtime.flush_wq);
520 guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags; 447 relay_close(guc->log.runtime.relay_chan);
448 i915_gem_object_unpin_map(guc->log.vma->obj);
449 guc->log.runtime.buf_addr = NULL;
521} 450}
522 451
523static int guc_log_late_setup(struct intel_guc *guc) 452static int guc_log_late_setup(struct intel_guc *guc)
@@ -527,24 +456,25 @@ static int guc_log_late_setup(struct intel_guc *guc)
527 456
528 lockdep_assert_held(&dev_priv->drm.struct_mutex); 457 lockdep_assert_held(&dev_priv->drm.struct_mutex);
529 458
530 if (i915.guc_log_level < 0) 459 if (!guc_log_has_runtime(guc)) {
531 return -EINVAL; 460 /* If log_level was set as -1 at boot time, then setup needed to
532 461 * handle log buffer flush interrupts would not have been done yet,
533 /* If log_level was set as -1 at boot time, then setup needed to 462 * so do that now.
534 * handle log buffer flush interrupts would not have been done yet, 463 */
535 * so do that now. 464 ret = guc_log_runtime_create(guc);
536 */ 465 if (ret)
537 ret = guc_log_create_extras(guc); 466 goto err;
538 if (ret) 467 }
539 goto err;
540 468
541 ret = guc_log_create_relay_file(guc); 469 ret = guc_log_relay_file_create(guc);
542 if (ret) 470 if (ret)
543 goto err; 471 goto err_runtime;
544 472
545 return 0; 473 return 0;
474
475err_runtime:
476 guc_log_runtime_destroy(guc);
546err: 477err:
547 guc_log_cleanup(guc);
548 /* logging will remain off */ 478 /* logging will remain off */
549 i915.guc_log_level = -1; 479 i915.guc_log_level = -1;
550 return ret; 480 return ret;
@@ -577,7 +507,7 @@ static void guc_flush_logs(struct intel_guc *guc)
577 /* Before initiating the forceful flush, wait for any pending/ongoing 507 /* Before initiating the forceful flush, wait for any pending/ongoing
578 * flush to complete otherwise forceful flush may not actually happen. 508 * flush to complete otherwise forceful flush may not actually happen.
579 */ 509 */
580 flush_work(&guc->log.flush_work); 510 flush_work(&guc->log.runtime.flush_work);
581 511
582 /* Ask GuC to update the log buffer state */ 512 /* Ask GuC to update the log buffer state */
583 guc_log_flush(guc); 513 guc_log_flush(guc);
@@ -586,6 +516,72 @@ static void guc_flush_logs(struct intel_guc *guc)
586 guc_log_capture_logs(guc); 516 guc_log_capture_logs(guc);
587} 517}
588 518
519int intel_guc_log_create(struct intel_guc *guc)
520{
521 struct i915_vma *vma;
522 unsigned long offset;
523 uint32_t size, flags;
524 int ret;
525
526 GEM_BUG_ON(guc->log.vma);
527
528 if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
529 i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
530
531 /* The first page is to save log buffer state. Allocate one
532 * extra page for others in case for overlap */
533 size = (1 + GUC_LOG_DPC_PAGES + 1 +
534 GUC_LOG_ISR_PAGES + 1 +
535 GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
536
537 /* We require SSE 4.1 for fast reads from the GuC log buffer and
538 * it should be present on the chipsets supporting GuC based
539 * submisssions.
540 */
541 if (WARN_ON(!i915_has_memcpy_from_wc())) {
542 ret = -EINVAL;
543 goto err;
544 }
545
546 vma = intel_guc_allocate_vma(guc, size);
547 if (IS_ERR(vma)) {
548 ret = PTR_ERR(vma);
549 goto err;
550 }
551
552 guc->log.vma = vma;
553
554 if (i915.guc_log_level >= 0) {
555 ret = guc_log_runtime_create(guc);
556 if (ret < 0)
557 goto err_vma;
558 }
559
560 /* each allocated unit is a page */
561 flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
562 (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
563 (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
564 (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
565
566 offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
567 guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
568
569 return 0;
570
571err_vma:
572 i915_vma_unpin_and_release(&guc->log.vma);
573err:
574 /* logging will be off */
575 i915.guc_log_level = -1;
576 return ret;
577}
578
579void intel_guc_log_destroy(struct intel_guc *guc)
580{
581 guc_log_runtime_destroy(guc);
582 i915_vma_unpin_and_release(&guc->log.vma);
583}
584
589int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) 585int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
590{ 586{
591 struct intel_guc *guc = &dev_priv->guc; 587 struct intel_guc *guc = &dev_priv->guc;
@@ -609,17 +605,22 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
609 return ret; 605 return ret;
610 } 606 }
611 607
612 i915.guc_log_level = log_param.verbosity; 608 if (log_param.logging_enabled) {
609 i915.guc_log_level = log_param.verbosity;
613 610
614 /* If log_level was set as -1 at boot time, then the relay channel file 611 /* If log_level was set as -1 at boot time, then the relay channel file
615 * wouldn't have been created by now and interrupts also would not have 612 * wouldn't have been created by now and interrupts also would not have
616 * been enabled. 613 * been enabled. Try again now, just in case.
617 */ 614 */
618 if (!dev_priv->guc.log.relay_chan) {
619 ret = guc_log_late_setup(guc); 615 ret = guc_log_late_setup(guc);
620 if (!ret) 616 if (ret < 0) {
621 gen9_enable_guc_interrupts(dev_priv); 617 DRM_DEBUG_DRIVER("GuC log late setup failed %d\n", ret);
622 } else if (!log_param.logging_enabled) { 618 return ret;
619 }
620
621 /* GuC logging is currently the only user of Guc2Host interrupts */
622 gen9_enable_guc_interrupts(dev_priv);
623 } else {
623 /* Once logging is disabled, GuC won't generate logs & send an 624 /* Once logging is disabled, GuC won't generate logs & send an
624 * interrupt. But there could be some data in the log buffer 625 * interrupt. But there could be some data in the log buffer
625 * which is yet to be captured. So request GuC to update the log 626 * which is yet to be captured. So request GuC to update the log
@@ -629,9 +630,6 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
629 630
630 /* As logging is disabled, update log level to reflect that */ 631 /* As logging is disabled, update log level to reflect that */
631 i915.guc_log_level = -1; 632 i915.guc_log_level = -1;
632 } else {
633 /* In case interrupts were disabled, enable them now */
634 gen9_enable_guc_interrupts(dev_priv);
635 } 633 }
636 634
637 return ret; 635 return ret;
@@ -639,7 +637,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
639 637
640void i915_guc_log_register(struct drm_i915_private *dev_priv) 638void i915_guc_log_register(struct drm_i915_private *dev_priv)
641{ 639{
642 if (!i915.enable_guc_submission) 640 if (!i915.enable_guc_submission || i915.guc_log_level < 0)
643 return; 641 return;
644 642
645 mutex_lock(&dev_priv->drm.struct_mutex); 643 mutex_lock(&dev_priv->drm.struct_mutex);
@@ -653,6 +651,8 @@ void i915_guc_log_unregister(struct drm_i915_private *dev_priv)
653 return; 651 return;
654 652
655 mutex_lock(&dev_priv->drm.struct_mutex); 653 mutex_lock(&dev_priv->drm.struct_mutex);
656 guc_log_cleanup(&dev_priv->guc); 654 /* GuC logging is currently the only user of Guc2Host interrupts */
655 gen9_disable_guc_interrupts(dev_priv);
656 guc_log_runtime_destroy(&dev_priv->guc);
657 mutex_unlock(&dev_priv->drm.struct_mutex); 657 mutex_unlock(&dev_priv->drm.struct_mutex);
658} 658}
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 8c04eca84351..e1ab6432a914 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -45,6 +45,8 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
45 return true; 45 return true;
46 if (IS_SKYLAKE(dev_priv)) 46 if (IS_SKYLAKE(dev_priv))
47 return true; 47 return true;
48 if (IS_KABYLAKE(dev_priv) && INTEL_DEVID(dev_priv) == 0x591D)
49 return true;
48 return false; 50 return false;
49} 51}
50 52
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 3eec74ca5116..1d623b5e09d6 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -34,6 +34,7 @@
34#include <drm/drm_atomic_helper.h> 34#include <drm/drm_atomic_helper.h>
35#include <drm/drm_crtc.h> 35#include <drm/drm_crtc.h>
36#include <drm/drm_edid.h> 36#include <drm/drm_edid.h>
37#include <drm/drm_scdc_helper.h>
37#include "intel_drv.h" 38#include "intel_drv.h"
38#include <drm/i915_drm.h> 39#include <drm/i915_drm.h>
39#include <drm/intel_lpe_audio.h> 40#include <drm/intel_lpe_audio.h>
@@ -1208,6 +1209,8 @@ static int intel_hdmi_source_max_tmds_clock(struct drm_i915_private *dev_priv)
1208{ 1209{
1209 if (IS_G4X(dev_priv)) 1210 if (IS_G4X(dev_priv))
1210 return 165000; 1211 return 165000;
1212 else if (IS_GEMINILAKE(dev_priv))
1213 return 594000;
1211 else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8) 1214 else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
1212 return 300000; 1215 return 300000;
1213 else 1216 else
@@ -1334,6 +1337,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
1334 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 1337 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
1335 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1338 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1336 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 1339 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1340 struct drm_scdc *scdc = &conn_state->connector->display_info.hdmi.scdc;
1337 int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock; 1341 int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
1338 int clock_12bpc = clock_8bpc * 3 / 2; 1342 int clock_12bpc = clock_8bpc * 3 / 2;
1339 int desired_bpp; 1343 int desired_bpp;
@@ -1403,6 +1407,16 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
1403 1407
1404 pipe_config->lane_count = 4; 1408 pipe_config->lane_count = 4;
1405 1409
1410 if (scdc->scrambling.supported && IS_GEMINILAKE(dev_priv)) {
1411 if (scdc->scrambling.low_rates)
1412 pipe_config->hdmi_scrambling = true;
1413
1414 if (pipe_config->port_clock > 340000) {
1415 pipe_config->hdmi_scrambling = true;
1416 pipe_config->hdmi_high_tmds_clock_ratio = true;
1417 }
1418 }
1419
1406 return true; 1420 return true;
1407} 1421}
1408 1422
@@ -1812,6 +1826,57 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
1812 intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE; 1826 intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
1813} 1827}
1814 1828
1829/*
1830 * intel_hdmi_handle_sink_scrambling: handle sink scrambling/clock ratio setup
1831 * @encoder: intel_encoder
1832 * @connector: drm_connector
1833 * @high_tmds_clock_ratio = bool to indicate if the function needs to set
1834 * or reset the high tmds clock ratio for scrambling
1835 * @scrambling: bool to Indicate if the function needs to set or reset
1836 * sink scrambling
1837 *
1838 * This function handles scrambling on HDMI 2.0 capable sinks.
1839 * If required clock rate is > 340 Mhz && scrambling is supported by sink
1840 * it enables scrambling. This should be called before enabling the HDMI
1841 * 2.0 port, as the sink can choose to disable the scrambling if it doesn't
1842 * detect a scrambled clock within 100 ms.
1843 */
1844void intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
1845 struct drm_connector *connector,
1846 bool high_tmds_clock_ratio,
1847 bool scrambling)
1848{
1849 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
1850 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1851 struct drm_scrambling *sink_scrambling =
1852 &connector->display_info.hdmi.scdc.scrambling;
1853 struct i2c_adapter *adptr = intel_gmbus_get_adapter(dev_priv,
1854 intel_hdmi->ddc_bus);
1855 bool ret;
1856
1857 if (!sink_scrambling->supported)
1858 return;
1859
1860 DRM_DEBUG_KMS("Setting sink scrambling for enc:%s connector:%s\n",
1861 encoder->base.name, connector->name);
1862
1863 /* Set TMDS bit clock ratio to 1/40 or 1/10 */
1864 ret = drm_scdc_set_high_tmds_clock_ratio(adptr, high_tmds_clock_ratio);
1865 if (!ret) {
1866 DRM_ERROR("Set TMDS ratio failed\n");
1867 return;
1868 }
1869
1870 /* Enable/disable sink scrambling */
1871 ret = drm_scdc_set_scrambling(adptr, scrambling);
1872 if (!ret) {
1873 DRM_ERROR("Set sink scrambling failed\n");
1874 return;
1875 }
1876
1877 DRM_DEBUG_KMS("sink scrambling handled\n");
1878}
1879
1815static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv, 1880static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
1816 enum port port) 1881 enum port port)
1817{ 1882{
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 7d210097eefa..f1200272a699 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -243,7 +243,8 @@ static bool intel_hpd_irq_event(struct drm_device *dev,
243 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 243 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
244 old_status = connector->status; 244 old_status = connector->status;
245 245
246 connector->status = connector->funcs->detect(connector, false); 246 connector->status = drm_helper_probe_detect(connector, NULL, false);
247
247 if (old_status == connector->status) 248 if (old_status == connector->status)
248 return false; 249 return false;
249 250
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 7af900bcdc05..9ee819666a4c 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -251,24 +251,6 @@ fail:
251} 251}
252 252
253/** 253/**
254 * intel_huc_fini() - clean up resources allocated for HuC
255 * @dev_priv: the drm_i915_private device
256 *
257 * Cleans up by releasing the huc firmware GEM obj.
258 */
259void intel_huc_fini(struct drm_i915_private *dev_priv)
260{
261 struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
262 struct drm_i915_gem_object *obj;
263
264 obj = fetch_and_zero(&huc_fw->obj);
265 if (obj)
266 i915_gem_object_put(obj);
267
268 huc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
269}
270
271/**
272 * intel_guc_auth_huc() - authenticate ucode 254 * intel_guc_auth_huc() - authenticate ucode
273 * @dev_priv: the drm_i915_device 255 * @dev_priv: the drm_i915_device
274 * 256 *
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index 7a5b41b1c024..25d8e76489e4 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -131,8 +131,15 @@ err:
131 131
132static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv) 132static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
133{ 133{
134 /* XXX Note that platform_device_register_full() allocates a dma_mask
135 * and never frees it. We can't free it here as we cannot guarantee
136 * this is the last reference (i.e. that the dma_mask will not be
137 * used after our unregister). So ee choose to leak the sizeof(u64)
138 * allocation here - it should be fixed in the platform_device rather
139 * than us fiddle with its internals.
140 */
141
134 platform_device_unregister(dev_priv->lpe_audio.platdev); 142 platform_device_unregister(dev_priv->lpe_audio.platdev);
135 kfree(dev_priv->lpe_audio.platdev->dev.dma_mask);
136} 143}
137 144
138static void lpe_audio_irq_unmask(struct irq_data *d) 145static void lpe_audio_irq_unmask(struct irq_data *d)
@@ -331,6 +338,7 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
331 * audio driver and i915 338 * audio driver and i915
332 * @dev_priv: the i915 drm device private data 339 * @dev_priv: the i915 drm device private data
333 * @eld : ELD data 340 * @eld : ELD data
341 * @pipe: pipe id
334 * @port: port id 342 * @port: port id
335 * @tmds_clk_speed: tmds clock frequency in Hz 343 * @tmds_clk_speed: tmds clock frequency in Hz
336 * 344 *
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 77168e673e0a..c8f7c631fc1f 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -326,7 +326,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
326 rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; 326 rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
327 u32 *reg_state = ce->lrc_reg_state; 327 u32 *reg_state = ce->lrc_reg_state;
328 328
329 GEM_BUG_ON(!IS_ALIGNED(rq->tail, 8)); 329 assert_ring_tail_valid(rq->ring, rq->tail);
330 reg_state[CTX_RING_TAIL+1] = rq->tail; 330 reg_state[CTX_RING_TAIL+1] = rq->tail;
331 331
332 /* True 32b PPGTT with dynamic page allocation: update PDP 332 /* True 32b PPGTT with dynamic page allocation: update PDP
@@ -399,22 +399,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
399{ 399{
400 struct drm_i915_gem_request *last; 400 struct drm_i915_gem_request *last;
401 struct execlist_port *port = engine->execlist_port; 401 struct execlist_port *port = engine->execlist_port;
402 unsigned long flags;
403 struct rb_node *rb; 402 struct rb_node *rb;
404 bool submit = false; 403 bool submit = false;
405 404
406 /* After execlist_first is updated, the tasklet will be rescheduled.
407 *
408 * If we are currently running (inside the tasklet) and a third
409 * party queues a request and so updates engine->execlist_first under
410 * the spinlock (which we have elided), it will atomically set the
411 * TASKLET_SCHED flag causing the us to be re-executed and pick up
412 * the change in state (the update to TASKLET_SCHED incurs a memory
413 * barrier making this cross-cpu checking safe).
414 */
415 if (!READ_ONCE(engine->execlist_first))
416 return;
417
418 last = port->request; 405 last = port->request;
419 if (last) 406 if (last)
420 /* WaIdleLiteRestore:bdw,skl 407 /* WaIdleLiteRestore:bdw,skl
@@ -448,7 +435,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
448 * and context switches) submission. 435 * and context switches) submission.
449 */ 436 */
450 437
451 spin_lock_irqsave(&engine->timeline->lock, flags); 438 spin_lock_irq(&engine->timeline->lock);
452 rb = engine->execlist_first; 439 rb = engine->execlist_first;
453 while (rb) { 440 while (rb) {
454 struct drm_i915_gem_request *cursor = 441 struct drm_i915_gem_request *cursor =
@@ -500,7 +487,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
500 i915_gem_request_assign(&port->request, last); 487 i915_gem_request_assign(&port->request, last);
501 engine->execlist_first = rb; 488 engine->execlist_first = rb;
502 } 489 }
503 spin_unlock_irqrestore(&engine->timeline->lock, flags); 490 spin_unlock_irq(&engine->timeline->lock);
504 491
505 if (submit) 492 if (submit)
506 execlists_submit_ports(engine); 493 execlists_submit_ports(engine);
@@ -530,24 +517,36 @@ static void intel_lrc_irq_handler(unsigned long data)
530 517
531 intel_uncore_forcewake_get(dev_priv, engine->fw_domains); 518 intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
532 519
533 while (test_and_clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) { 520 /* Prefer doing test_and_clear_bit() as a two stage operation to avoid
521 * imposing the cost of a locked atomic transaction when submitting a
522 * new request (outside of the context-switch interrupt).
523 */
524 while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
534 u32 __iomem *csb_mmio = 525 u32 __iomem *csb_mmio =
535 dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)); 526 dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
536 u32 __iomem *buf = 527 u32 __iomem *buf =
537 dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)); 528 dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0));
538 unsigned int csb, head, tail; 529 unsigned int head, tail;
539 530
540 csb = readl(csb_mmio); 531 /* The write will be ordered by the uncached read (itself
541 head = GEN8_CSB_READ_PTR(csb); 532 * a memory barrier), so we do not need another in the form
542 tail = GEN8_CSB_WRITE_PTR(csb); 533 * of a locked instruction. The race between the interrupt
543 if (head == tail) 534 * handler and the split test/clear is harmless as we order
544 break; 535 * our clear before the CSB read. If the interrupt arrived
536 * first between the test and the clear, we read the updated
537 * CSB and clear the bit. If the interrupt arrives as we read
538 * the CSB or later (i.e. after we had cleared the bit) the bit
539 * is set and we do a new loop.
540 */
541 __clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
542 head = readl(csb_mmio);
543 tail = GEN8_CSB_WRITE_PTR(head);
544 head = GEN8_CSB_READ_PTR(head);
545 while (head != tail) {
546 unsigned int status;
545 547
546 if (tail < head) 548 if (++head == GEN8_CSB_ENTRIES)
547 tail += GEN8_CSB_ENTRIES; 549 head = 0;
548 do {
549 unsigned int idx = ++head % GEN8_CSB_ENTRIES;
550 unsigned int status = readl(buf + 2 * idx);
551 550
552 /* We are flying near dragons again. 551 /* We are flying near dragons again.
553 * 552 *
@@ -566,11 +565,12 @@ static void intel_lrc_irq_handler(unsigned long data)
566 * status notifier. 565 * status notifier.
567 */ 566 */
568 567
568 status = readl(buf + 2 * head);
569 if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK)) 569 if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
570 continue; 570 continue;
571 571
572 /* Check the context/desc id for this event matches */ 572 /* Check the context/desc id for this event matches */
573 GEM_DEBUG_BUG_ON(readl(buf + 2 * idx + 1) != 573 GEM_DEBUG_BUG_ON(readl(buf + 2 * head + 1) !=
574 port[0].context_id); 574 port[0].context_id);
575 575
576 GEM_BUG_ON(port[0].count == 0); 576 GEM_BUG_ON(port[0].count == 0);
@@ -588,10 +588,9 @@ static void intel_lrc_irq_handler(unsigned long data)
588 588
589 GEM_BUG_ON(port[0].count == 0 && 589 GEM_BUG_ON(port[0].count == 0 &&
590 !(status & GEN8_CTX_STATUS_ACTIVE_IDLE)); 590 !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
591 } while (head < tail); 591 }
592 592
593 writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, 593 writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8),
594 GEN8_CSB_WRITE_PTR(csb) << 8),
595 csb_mmio); 594 csb_mmio);
596 } 595 }
597 596
@@ -647,15 +646,14 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
647static struct intel_engine_cs * 646static struct intel_engine_cs *
648pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked) 647pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
649{ 648{
650 struct intel_engine_cs *engine; 649 struct intel_engine_cs *engine =
650 container_of(pt, struct drm_i915_gem_request, priotree)->engine;
651
652 GEM_BUG_ON(!locked);
651 653
652 engine = container_of(pt,
653 struct drm_i915_gem_request,
654 priotree)->engine;
655 if (engine != locked) { 654 if (engine != locked) {
656 if (locked) 655 spin_unlock(&locked->timeline->lock);
657 spin_unlock_irq(&locked->timeline->lock); 656 spin_lock(&engine->timeline->lock);
658 spin_lock_irq(&engine->timeline->lock);
659 } 657 }
660 658
661 return engine; 659 return engine;
@@ -663,7 +661,7 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
663 661
664static void execlists_schedule(struct drm_i915_gem_request *request, int prio) 662static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
665{ 663{
666 struct intel_engine_cs *engine = NULL; 664 struct intel_engine_cs *engine;
667 struct i915_dependency *dep, *p; 665 struct i915_dependency *dep, *p;
668 struct i915_dependency stack; 666 struct i915_dependency stack;
669 LIST_HEAD(dfs); 667 LIST_HEAD(dfs);
@@ -697,26 +695,23 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
697 list_for_each_entry_safe(dep, p, &dfs, dfs_link) { 695 list_for_each_entry_safe(dep, p, &dfs, dfs_link) {
698 struct i915_priotree *pt = dep->signaler; 696 struct i915_priotree *pt = dep->signaler;
699 697
700 list_for_each_entry(p, &pt->signalers_list, signal_link) 698 /* Within an engine, there can be no cycle, but we may
699 * refer to the same dependency chain multiple times
700 * (redundant dependencies are not eliminated) and across
701 * engines.
702 */
703 list_for_each_entry(p, &pt->signalers_list, signal_link) {
704 GEM_BUG_ON(p->signaler->priority < pt->priority);
701 if (prio > READ_ONCE(p->signaler->priority)) 705 if (prio > READ_ONCE(p->signaler->priority))
702 list_move_tail(&p->dfs_link, &dfs); 706 list_move_tail(&p->dfs_link, &dfs);
707 }
703 708
704 list_safe_reset_next(dep, p, dfs_link); 709 list_safe_reset_next(dep, p, dfs_link);
705 if (!RB_EMPTY_NODE(&pt->node))
706 continue;
707
708 engine = pt_lock_engine(pt, engine);
709
710 /* If it is not already in the rbtree, we can update the
711 * priority inplace and skip over it (and its dependencies)
712 * if it is referenced *again* as we descend the dfs.
713 */
714 if (prio > pt->priority && RB_EMPTY_NODE(&pt->node)) {
715 pt->priority = prio;
716 list_del_init(&dep->dfs_link);
717 }
718 } 710 }
719 711
712 engine = request->engine;
713 spin_lock_irq(&engine->timeline->lock);
714
720 /* Fifo and depth-first replacement ensure our deps execute before us */ 715 /* Fifo and depth-first replacement ensure our deps execute before us */
721 list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) { 716 list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
722 struct i915_priotree *pt = dep->signaler; 717 struct i915_priotree *pt = dep->signaler;
@@ -728,16 +723,15 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
728 if (prio <= pt->priority) 723 if (prio <= pt->priority)
729 continue; 724 continue;
730 725
731 GEM_BUG_ON(RB_EMPTY_NODE(&pt->node));
732
733 pt->priority = prio; 726 pt->priority = prio;
734 rb_erase(&pt->node, &engine->execlist_queue); 727 if (!RB_EMPTY_NODE(&pt->node)) {
735 if (insert_request(pt, &engine->execlist_queue)) 728 rb_erase(&pt->node, &engine->execlist_queue);
736 engine->execlist_first = &pt->node; 729 if (insert_request(pt, &engine->execlist_queue))
730 engine->execlist_first = &pt->node;
731 }
737 } 732 }
738 733
739 if (engine) 734 spin_unlock_irq(&engine->timeline->lock);
740 spin_unlock_irq(&engine->timeline->lock);
741 735
742 /* XXX Do we need to preempt to make room for us and our deps? */ 736 /* XXX Do we need to preempt to make room for us and our deps? */
743} 737}
@@ -1255,7 +1249,6 @@ static void reset_common_ring(struct intel_engine_cs *engine,
1255 ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix; 1249 ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix;
1256 1250
1257 request->ring->head = request->postfix; 1251 request->ring->head = request->postfix;
1258 request->ring->last_retired_head = -1;
1259 intel_ring_update_space(request->ring); 1252 intel_ring_update_space(request->ring);
1260 1253
1261 /* Catch up with any missed context-switch interrupts */ 1254 /* Catch up with any missed context-switch interrupts */
@@ -1268,8 +1261,10 @@ static void reset_common_ring(struct intel_engine_cs *engine,
1268 GEM_BUG_ON(request->ctx != port[0].request->ctx); 1261 GEM_BUG_ON(request->ctx != port[0].request->ctx);
1269 1262
1270 /* Reset WaIdleLiteRestore:bdw,skl as well */ 1263 /* Reset WaIdleLiteRestore:bdw,skl as well */
1271 request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32); 1264 request->tail =
1272 GEM_BUG_ON(!IS_ALIGNED(request->tail, 8)); 1265 intel_ring_wrap(request->ring,
1266 request->wa_tail - WA_TAIL_DWORDS*sizeof(u32));
1267 assert_ring_tail_valid(request->ring, request->tail);
1273} 1268}
1274 1269
1275static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) 1270static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
@@ -1480,7 +1475,7 @@ static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs)
1480 *cs++ = MI_USER_INTERRUPT; 1475 *cs++ = MI_USER_INTERRUPT;
1481 *cs++ = MI_NOOP; 1476 *cs++ = MI_NOOP;
1482 request->tail = intel_ring_offset(request, cs); 1477 request->tail = intel_ring_offset(request, cs);
1483 GEM_BUG_ON(!IS_ALIGNED(request->tail, 8)); 1478 assert_ring_tail_valid(request->ring, request->tail);
1484 1479
1485 gen8_emit_wa_tail(request, cs); 1480 gen8_emit_wa_tail(request, cs);
1486} 1481}
@@ -1508,7 +1503,7 @@ static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
1508 *cs++ = MI_USER_INTERRUPT; 1503 *cs++ = MI_USER_INTERRUPT;
1509 *cs++ = MI_NOOP; 1504 *cs++ = MI_NOOP;
1510 request->tail = intel_ring_offset(request, cs); 1505 request->tail = intel_ring_offset(request, cs);
1511 GEM_BUG_ON(!IS_ALIGNED(request->tail, 8)); 1506 assert_ring_tail_valid(request->ring, request->tail);
1512 1507
1513 gen8_emit_wa_tail(request, cs); 1508 gen8_emit_wa_tail(request, cs);
1514} 1509}
@@ -1575,6 +1570,7 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
1575{ 1570{
1576 engine->submit_request = execlists_submit_request; 1571 engine->submit_request = execlists_submit_request;
1577 engine->schedule = execlists_schedule; 1572 engine->schedule = execlists_schedule;
1573 engine->irq_tasklet.func = intel_lrc_irq_handler;
1578} 1574}
1579 1575
1580static void 1576static void
@@ -2041,7 +2037,6 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
2041 i915_gem_object_unpin_map(ce->state->obj); 2037 i915_gem_object_unpin_map(ce->state->obj);
2042 2038
2043 ce->ring->head = ce->ring->tail = 0; 2039 ce->ring->head = ce->ring->tail = 0;
2044 ce->ring->last_retired_head = -1;
2045 intel_ring_update_space(ce->ring); 2040 intel_ring_update_space(ce->ring);
2046 } 2041 }
2047 } 2042 }
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 441c01466384..d44465190dc1 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -920,6 +920,8 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
920 char buf[sizeof(OPREGION_SIGNATURE)]; 920 char buf[sizeof(OPREGION_SIGNATURE)];
921 int err = 0; 921 int err = 0;
922 void *base; 922 void *base;
923 const void *vbt;
924 u32 vbt_size;
923 925
924 BUILD_BUG_ON(sizeof(struct opregion_header) != 0x100); 926 BUILD_BUG_ON(sizeof(struct opregion_header) != 0x100);
925 BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100); 927 BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100);
@@ -972,45 +974,46 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
972 if (mboxes & MBOX_ASLE_EXT) 974 if (mboxes & MBOX_ASLE_EXT)
973 DRM_DEBUG_DRIVER("ASLE extension supported\n"); 975 DRM_DEBUG_DRIVER("ASLE extension supported\n");
974 976
975 if (!dmi_check_system(intel_no_opregion_vbt)) { 977 if (dmi_check_system(intel_no_opregion_vbt))
976 const void *vbt = NULL; 978 goto out;
977 u32 vbt_size = 0;
978
979 if (opregion->header->opregion_ver >= 2 && opregion->asle &&
980 opregion->asle->rvda && opregion->asle->rvds) {
981 opregion->rvda = memremap(opregion->asle->rvda,
982 opregion->asle->rvds,
983 MEMREMAP_WB);
984 vbt = opregion->rvda;
985 vbt_size = opregion->asle->rvds;
986 }
987 979
980 if (opregion->header->opregion_ver >= 2 && opregion->asle &&
981 opregion->asle->rvda && opregion->asle->rvds) {
982 opregion->rvda = memremap(opregion->asle->rvda,
983 opregion->asle->rvds,
984 MEMREMAP_WB);
985 vbt = opregion->rvda;
986 vbt_size = opregion->asle->rvds;
988 if (intel_bios_is_valid_vbt(vbt, vbt_size)) { 987 if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
989 DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (RVDA)\n"); 988 DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (RVDA)\n");
990 opregion->vbt = vbt; 989 opregion->vbt = vbt;
991 opregion->vbt_size = vbt_size; 990 opregion->vbt_size = vbt_size;
991 goto out;
992 } else { 992 } else {
993 vbt = base + OPREGION_VBT_OFFSET; 993 DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n");
994 /*
995 * The VBT specification says that if the ASLE ext
996 * mailbox is not used its area is reserved, but
997 * on some CHT boards the VBT extends into the
998 * ASLE ext area. Allow this even though it is
999 * against the spec, so we do not end up rejecting
1000 * the VBT on those boards (and end up not finding the
1001 * LCD panel because of this).
1002 */
1003 vbt_size = (mboxes & MBOX_ASLE_EXT) ?
1004 OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE;
1005 vbt_size -= OPREGION_VBT_OFFSET;
1006 if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
1007 DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
1008 opregion->vbt = vbt;
1009 opregion->vbt_size = vbt_size;
1010 }
1011 } 994 }
1012 } 995 }
1013 996
997 vbt = base + OPREGION_VBT_OFFSET;
998 /*
999 * The VBT specification says that if the ASLE ext mailbox is not used
1000 * its area is reserved, but on some CHT boards the VBT extends into the
1001 * ASLE ext area. Allow this even though it is against the spec, so we
1002 * do not end up rejecting the VBT on those boards (and end up not
1003 * finding the LCD panel because of this).
1004 */
1005 vbt_size = (mboxes & MBOX_ASLE_EXT) ?
1006 OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE;
1007 vbt_size -= OPREGION_VBT_OFFSET;
1008 if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
1009 DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
1010 opregion->vbt = vbt;
1011 opregion->vbt_size = vbt_size;
1012 } else {
1013 DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (Mailbox #4)\n");
1014 }
1015
1016out:
1014 return 0; 1017 return 0;
1015 1018
1016err_out: 1019err_out:
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index 9fd9c70baeed..206ee4f0150e 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -522,7 +522,7 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
522 goto unlock; 522 goto unlock;
523 } 523 }
524 524
525 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base); 525 state->acquire_ctx = crtc->base.dev->mode_config.acquire_ctx;
526 pipe_config = intel_atomic_get_crtc_state(state, crtc); 526 pipe_config = intel_atomic_get_crtc_state(state, crtc);
527 if (IS_ERR(pipe_config)) { 527 if (IS_ERR(pipe_config)) {
528 ret = PTR_ERR(pipe_config); 528 ret = PTR_ERR(pipe_config);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index aece0ff88a5d..570bd603f401 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -655,6 +655,29 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
655 return wm_size; 655 return wm_size;
656} 656}
657 657
658static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
659 const struct intel_plane_state *plane_state)
660{
661 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
662
663 /* FIXME check the 'enable' instead */
664 if (!crtc_state->base.active)
665 return false;
666
667 /*
668 * Treat cursor with fb as always visible since cursor updates
669 * can happen faster than the vrefresh rate, and the current
670 * watermark code doesn't handle that correctly. Cursor updates
671 * which set/clear the fb or change the cursor size are going
672 * to get throttled by intel_legacy_cursor_update() to work
673 * around this problem with the watermark code.
674 */
675 if (plane->id == PLANE_CURSOR)
676 return plane_state->base.fb != NULL;
677 else
678 return plane_state->base.visible;
679}
680
658static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv) 681static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
659{ 682{
660 struct intel_crtc *crtc, *enabled = NULL; 683 struct intel_crtc *crtc, *enabled = NULL;
@@ -1961,7 +1984,7 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
1961 uint32_t method1, method2; 1984 uint32_t method1, method2;
1962 int cpp; 1985 int cpp;
1963 1986
1964 if (!cstate->base.active || !pstate->base.visible) 1987 if (!intel_wm_plane_visible(cstate, pstate))
1965 return 0; 1988 return 0;
1966 1989
1967 cpp = pstate->base.fb->format->cpp[0]; 1990 cpp = pstate->base.fb->format->cpp[0];
@@ -1990,7 +2013,7 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
1990 uint32_t method1, method2; 2013 uint32_t method1, method2;
1991 int cpp; 2014 int cpp;
1992 2015
1993 if (!cstate->base.active || !pstate->base.visible) 2016 if (!intel_wm_plane_visible(cstate, pstate))
1994 return 0; 2017 return 0;
1995 2018
1996 cpp = pstate->base.fb->format->cpp[0]; 2019 cpp = pstate->base.fb->format->cpp[0];
@@ -2013,15 +2036,7 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
2013{ 2036{
2014 int cpp; 2037 int cpp;
2015 2038
2016 /* 2039 if (!intel_wm_plane_visible(cstate, pstate))
2017 * Treat cursor with fb as always visible since cursor updates
2018 * can happen faster than the vrefresh rate, and the current
2019 * watermark code doesn't handle that correctly. Cursor updates
2020 * which set/clear the fb or change the cursor size are going
2021 * to get throttled by intel_legacy_cursor_update() to work
2022 * around this problem with the watermark code.
2023 */
2024 if (!cstate->base.active || !pstate->base.fb)
2025 return 0; 2040 return 0;
2026 2041
2027 cpp = pstate->base.fb->format->cpp[0]; 2042 cpp = pstate->base.fb->format->cpp[0];
@@ -2038,7 +2053,7 @@ static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
2038{ 2053{
2039 int cpp; 2054 int cpp;
2040 2055
2041 if (!cstate->base.active || !pstate->base.visible) 2056 if (!intel_wm_plane_visible(cstate, pstate))
2042 return 0; 2057 return 0;
2043 2058
2044 cpp = pstate->base.fb->format->cpp[0]; 2059 cpp = pstate->base.fb->format->cpp[0];
@@ -3346,19 +3361,29 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
3346 * Caller should take care of dividing & rounding off the value. 3361 * Caller should take care of dividing & rounding off the value.
3347 */ 3362 */
3348static uint32_t 3363static uint32_t
3349skl_plane_downscale_amount(const struct intel_plane_state *pstate) 3364skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
3365 const struct intel_plane_state *pstate)
3350{ 3366{
3367 struct intel_plane *plane = to_intel_plane(pstate->base.plane);
3351 uint32_t downscale_h, downscale_w; 3368 uint32_t downscale_h, downscale_w;
3352 uint32_t src_w, src_h, dst_w, dst_h; 3369 uint32_t src_w, src_h, dst_w, dst_h;
3353 3370
3354 if (WARN_ON(!pstate->base.visible)) 3371 if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
3355 return DRM_PLANE_HELPER_NO_SCALING; 3372 return DRM_PLANE_HELPER_NO_SCALING;
3356 3373
3357 /* n.b., src is 16.16 fixed point, dst is whole integer */ 3374 /* n.b., src is 16.16 fixed point, dst is whole integer */
3358 src_w = drm_rect_width(&pstate->base.src); 3375 if (plane->id == PLANE_CURSOR) {
3359 src_h = drm_rect_height(&pstate->base.src); 3376 src_w = pstate->base.src_w;
3360 dst_w = drm_rect_width(&pstate->base.dst); 3377 src_h = pstate->base.src_h;
3361 dst_h = drm_rect_height(&pstate->base.dst); 3378 dst_w = pstate->base.crtc_w;
3379 dst_h = pstate->base.crtc_h;
3380 } else {
3381 src_w = drm_rect_width(&pstate->base.src);
3382 src_h = drm_rect_height(&pstate->base.src);
3383 dst_w = drm_rect_width(&pstate->base.dst);
3384 dst_h = drm_rect_height(&pstate->base.dst);
3385 }
3386
3362 if (drm_rotation_90_or_270(pstate->base.rotation)) 3387 if (drm_rotation_90_or_270(pstate->base.rotation))
3363 swap(dst_w, dst_h); 3388 swap(dst_w, dst_h);
3364 3389
@@ -3374,6 +3399,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
3374 const struct drm_plane_state *pstate, 3399 const struct drm_plane_state *pstate,
3375 int y) 3400 int y)
3376{ 3401{
3402 struct intel_plane *plane = to_intel_plane(pstate->plane);
3377 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); 3403 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
3378 uint32_t down_scale_amount, data_rate; 3404 uint32_t down_scale_amount, data_rate;
3379 uint32_t width = 0, height = 0; 3405 uint32_t width = 0, height = 0;
@@ -3386,7 +3412,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
3386 fb = pstate->fb; 3412 fb = pstate->fb;
3387 format = fb->format->format; 3413 format = fb->format->format;
3388 3414
3389 if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR) 3415 if (plane->id == PLANE_CURSOR)
3390 return 0; 3416 return 0;
3391 if (y && format != DRM_FORMAT_NV12) 3417 if (y && format != DRM_FORMAT_NV12)
3392 return 0; 3418 return 0;
@@ -3410,7 +3436,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
3410 data_rate = width * height * fb->format->cpp[0]; 3436 data_rate = width * height * fb->format->cpp[0];
3411 } 3437 }
3412 3438
3413 down_scale_amount = skl_plane_downscale_amount(intel_pstate); 3439 down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
3414 3440
3415 return (uint64_t)data_rate * down_scale_amount >> 16; 3441 return (uint64_t)data_rate * down_scale_amount >> 16;
3416} 3442}
@@ -3702,7 +3728,7 @@ static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cst
3702 uint64_t pixel_rate; 3728 uint64_t pixel_rate;
3703 3729
3704 /* Shouldn't reach here on disabled planes... */ 3730 /* Shouldn't reach here on disabled planes... */
3705 if (WARN_ON(!pstate->base.visible)) 3731 if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
3706 return 0; 3732 return 0;
3707 3733
3708 /* 3734 /*
@@ -3710,7 +3736,7 @@ static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cst
3710 * with additional adjustments for plane-specific scaling. 3736 * with additional adjustments for plane-specific scaling.
3711 */ 3737 */
3712 adjusted_pixel_rate = cstate->pixel_rate; 3738 adjusted_pixel_rate = cstate->pixel_rate;
3713 downscale_amount = skl_plane_downscale_amount(pstate); 3739 downscale_amount = skl_plane_downscale_amount(cstate, pstate);
3714 3740
3715 pixel_rate = adjusted_pixel_rate * downscale_amount >> 16; 3741 pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
3716 WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0)); 3742 WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0));
@@ -3727,6 +3753,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3727 uint8_t *out_lines, /* out */ 3753 uint8_t *out_lines, /* out */
3728 bool *enabled /* out */) 3754 bool *enabled /* out */)
3729{ 3755{
3756 struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
3730 struct drm_plane_state *pstate = &intel_pstate->base; 3757 struct drm_plane_state *pstate = &intel_pstate->base;
3731 struct drm_framebuffer *fb = pstate->fb; 3758 struct drm_framebuffer *fb = pstate->fb;
3732 uint32_t latency = dev_priv->wm.skl_latency[level]; 3759 uint32_t latency = dev_priv->wm.skl_latency[level];
@@ -3746,7 +3773,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3746 bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state); 3773 bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
3747 bool y_tiled, x_tiled; 3774 bool y_tiled, x_tiled;
3748 3775
3749 if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) { 3776 if (latency == 0 ||
3777 !intel_wm_plane_visible(cstate, intel_pstate)) {
3750 *enabled = false; 3778 *enabled = false;
3751 return 0; 3779 return 0;
3752 } 3780 }
@@ -3762,8 +3790,13 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3762 if (apply_memory_bw_wa && x_tiled) 3790 if (apply_memory_bw_wa && x_tiled)
3763 latency += 15; 3791 latency += 15;
3764 3792
3765 width = drm_rect_width(&intel_pstate->base.src) >> 16; 3793 if (plane->id == PLANE_CURSOR) {
3766 height = drm_rect_height(&intel_pstate->base.src) >> 16; 3794 width = intel_pstate->base.crtc_w;
3795 height = intel_pstate->base.crtc_h;
3796 } else {
3797 width = drm_rect_width(&intel_pstate->base.src) >> 16;
3798 height = drm_rect_height(&intel_pstate->base.src) >> 16;
3799 }
3767 3800
3768 if (drm_rotation_90_or_270(pstate->rotation)) 3801 if (drm_rotation_90_or_270(pstate->rotation))
3769 swap(width, height); 3802 swap(width, height);
@@ -8055,7 +8088,7 @@ static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
8055 case GEN6_PCODE_TIMEOUT: 8088 case GEN6_PCODE_TIMEOUT:
8056 return -ETIMEDOUT; 8089 return -ETIMEDOUT;
8057 default: 8090 default:
8058 MISSING_CASE(flags) 8091 MISSING_CASE(flags);
8059 return 0; 8092 return 0;
8060 } 8093 }
8061} 8094}
@@ -8355,6 +8388,7 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
8355 const i915_reg_t reg) 8388 const i915_reg_t reg)
8356{ 8389{
8357 u32 lower, upper, tmp; 8390 u32 lower, upper, tmp;
8391 int loop = 2;
8358 8392
8359 /* The register accessed do not need forcewake. We borrow 8393 /* The register accessed do not need forcewake. We borrow
8360 * uncore lock to prevent concurrent access to range reg. 8394 * uncore lock to prevent concurrent access to range reg.
@@ -8383,7 +8417,7 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
8383 I915_WRITE_FW(VLV_COUNTER_CONTROL, 8417 I915_WRITE_FW(VLV_COUNTER_CONTROL,
8384 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH)); 8418 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
8385 upper = I915_READ_FW(reg); 8419 upper = I915_READ_FW(reg);
8386 } while (upper != tmp); 8420 } while (upper != tmp && --loop);
8387 8421
8388 /* Everywhere else we always use VLV_COUNTER_CONTROL with the 8422 /* Everywhere else we always use VLV_COUNTER_CONTROL with the
8389 * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set 8423 * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index d9b8d17c3fc6..66a2b8b83972 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -49,13 +49,7 @@ static int __intel_ring_space(int head, int tail, int size)
49 49
50void intel_ring_update_space(struct intel_ring *ring) 50void intel_ring_update_space(struct intel_ring *ring)
51{ 51{
52 if (ring->last_retired_head != -1) { 52 ring->space = __intel_ring_space(ring->head, ring->tail, ring->size);
53 ring->head = ring->last_retired_head;
54 ring->last_retired_head = -1;
55 }
56
57 ring->space = __intel_ring_space(ring->head & HEAD_ADDR,
58 ring->tail, ring->size);
59} 53}
60 54
61static int 55static int
@@ -618,12 +612,8 @@ static void reset_ring_common(struct intel_engine_cs *engine,
618 } 612 }
619 613
620 /* If the rq hung, jump to its breadcrumb and skip the batch */ 614 /* If the rq hung, jump to its breadcrumb and skip the batch */
621 if (request->fence.error == -EIO) { 615 if (request->fence.error == -EIO)
622 struct intel_ring *ring = request->ring; 616 request->ring->head = request->postfix;
623
624 ring->head = request->postfix;
625 ring->last_retired_head = -1;
626 }
627 } else { 617 } else {
628 engine->legacy_active_context = NULL; 618 engine->legacy_active_context = NULL;
629 } 619 }
@@ -784,7 +774,7 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
784 774
785 i915_gem_request_submit(request); 775 i915_gem_request_submit(request);
786 776
787 GEM_BUG_ON(!IS_ALIGNED(request->tail, 8)); 777 assert_ring_tail_valid(request->ring, request->tail);
788 I915_WRITE_TAIL(request->engine, request->tail); 778 I915_WRITE_TAIL(request->engine, request->tail);
789} 779}
790 780
@@ -796,7 +786,7 @@ static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
796 *cs++ = MI_USER_INTERRUPT; 786 *cs++ = MI_USER_INTERRUPT;
797 787
798 req->tail = intel_ring_offset(req, cs); 788 req->tail = intel_ring_offset(req, cs);
799 GEM_BUG_ON(!IS_ALIGNED(req->tail, 8)); 789 assert_ring_tail_valid(req->ring, req->tail);
800} 790}
801 791
802static const int i9xx_emit_breadcrumb_sz = 4; 792static const int i9xx_emit_breadcrumb_sz = 4;
@@ -835,7 +825,7 @@ static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
835 *cs++ = MI_NOOP; 825 *cs++ = MI_NOOP;
836 826
837 req->tail = intel_ring_offset(req, cs); 827 req->tail = intel_ring_offset(req, cs);
838 GEM_BUG_ON(!IS_ALIGNED(req->tail, 8)); 828 assert_ring_tail_valid(req->ring, req->tail);
839} 829}
840 830
841static const int gen8_render_emit_breadcrumb_sz = 8; 831static const int gen8_render_emit_breadcrumb_sz = 8;
@@ -1392,7 +1382,6 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
1392 if (IS_I830(engine->i915) || IS_I845G(engine->i915)) 1382 if (IS_I830(engine->i915) || IS_I845G(engine->i915))
1393 ring->effective_size -= 2 * CACHELINE_BYTES; 1383 ring->effective_size -= 2 * CACHELINE_BYTES;
1394 1384
1395 ring->last_retired_head = -1;
1396 intel_ring_update_space(ring); 1385 intel_ring_update_space(ring);
1397 1386
1398 vma = intel_ring_create_vma(engine->i915, size); 1387 vma = intel_ring_create_vma(engine->i915, size);
@@ -1451,6 +1440,8 @@ static int intel_ring_context_pin(struct intel_engine_cs *engine,
1451 ret = context_pin(ctx); 1440 ret = context_pin(ctx);
1452 if (ret) 1441 if (ret)
1453 goto error; 1442 goto error;
1443
1444 ce->state->obj->mm.dirty = true;
1454 } 1445 }
1455 1446
1456 /* The kernel context is only used as a placeholder for flushing the 1447 /* The kernel context is only used as a placeholder for flushing the
@@ -1571,10 +1562,8 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
1571 struct intel_engine_cs *engine; 1562 struct intel_engine_cs *engine;
1572 enum intel_engine_id id; 1563 enum intel_engine_id id;
1573 1564
1574 for_each_engine(engine, dev_priv, id) { 1565 for_each_engine(engine, dev_priv, id)
1575 engine->buffer->head = engine->buffer->tail; 1566 engine->buffer->head = engine->buffer->tail;
1576 engine->buffer->last_retired_head = -1;
1577 }
1578} 1567}
1579 1568
1580static int ring_request_alloc(struct drm_i915_gem_request *request) 1569static int ring_request_alloc(struct drm_i915_gem_request *request)
@@ -2128,7 +2117,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
2128 2117
2129 num_rings = 2118 num_rings =
2130 hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1; 2119 hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
2131 engine->emit_breadcrumb_sz += num_rings * 6; 2120 engine->emit_breadcrumb_sz += num_rings * 8;
2132 } 2121 }
2133 } else if (INTEL_GEN(dev_priv) >= 6) { 2122 } else if (INTEL_GEN(dev_priv) >= 6) {
2134 engine->init_context = intel_rcs_ctx_init; 2123 engine->init_context = intel_rcs_ctx_init;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 847aea554464..a82a0807f64d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -149,16 +149,6 @@ struct intel_ring {
149 int space; 149 int space;
150 int size; 150 int size;
151 int effective_size; 151 int effective_size;
152
153 /** We track the position of the requests in the ring buffer, and
154 * when each is retired we increment last_retired_head as the GPU
155 * must have finished processing the request and so we know we
156 * can advance the ringbuffer up to that position.
157 *
158 * last_retired_head is set to -1 after the value is consumed so
159 * we can detect new retirements.
160 */
161 u32 last_retired_head;
162}; 152};
163 153
164struct i915_gem_context; 154struct i915_gem_context;
@@ -442,18 +432,10 @@ struct intel_engine_cs {
442 u32 (*get_cmd_length_mask)(u32 cmd_header); 432 u32 (*get_cmd_length_mask)(u32 cmd_header);
443}; 433};
444 434
445static inline unsigned 435static inline unsigned int
446intel_engine_flag(const struct intel_engine_cs *engine) 436intel_engine_flag(const struct intel_engine_cs *engine)
447{ 437{
448 return 1 << engine->id; 438 return BIT(engine->id);
449}
450
451static inline void
452intel_flush_status_page(struct intel_engine_cs *engine, int reg)
453{
454 mb();
455 clflush(&engine->status_page.page_addr[reg]);
456 mb();
457} 439}
458 440
459static inline u32 441static inline u32
@@ -464,14 +446,22 @@ intel_read_status_page(struct intel_engine_cs *engine, int reg)
464} 446}
465 447
466static inline void 448static inline void
467intel_write_status_page(struct intel_engine_cs *engine, 449intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
468 int reg, u32 value)
469{ 450{
470 mb(); 451 /* Writing into the status page should be done sparingly. Since
471 clflush(&engine->status_page.page_addr[reg]); 452 * we do when we are uncertain of the device state, we take a bit
472 engine->status_page.page_addr[reg] = value; 453 * of extra paranoia to try and ensure that the HWS takes the value
473 clflush(&engine->status_page.page_addr[reg]); 454 * we give and that it doesn't end up trapped inside the CPU!
474 mb(); 455 */
456 if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
457 mb();
458 clflush(&engine->status_page.page_addr[reg]);
459 engine->status_page.page_addr[reg] = value;
460 clflush(&engine->status_page.page_addr[reg]);
461 mb();
462 } else {
463 WRITE_ONCE(engine->status_page.page_addr[reg], value);
464 }
475} 465}
476 466
477/* 467/*
@@ -525,12 +515,29 @@ intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
525} 515}
526 516
527static inline u32 517static inline u32
528intel_ring_offset(struct drm_i915_gem_request *req, void *addr) 518intel_ring_wrap(const struct intel_ring *ring, u32 pos)
519{
520 return pos & (ring->size - 1);
521}
522
523static inline u32
524intel_ring_offset(const struct drm_i915_gem_request *req, void *addr)
529{ 525{
530 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */ 526 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
531 u32 offset = addr - req->ring->vaddr; 527 u32 offset = addr - req->ring->vaddr;
532 GEM_BUG_ON(offset > req->ring->size); 528 GEM_BUG_ON(offset > req->ring->size);
533 return offset & (req->ring->size - 1); 529 return intel_ring_wrap(req->ring, offset);
530}
531
532static inline void
533assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
534{
535 /* We could combine these into a single tail operation, but keeping
536 * them as seperate tests will help identify the cause should one
537 * ever fire.
538 */
539 GEM_BUG_ON(!IS_ALIGNED(tail, 8));
540 GEM_BUG_ON(tail >= ring->size);
534} 541}
535 542
536void intel_ring_update_space(struct intel_ring *ring); 543void intel_ring_update_space(struct intel_ring *ring);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 012bc358a33a..f8a375f8dde6 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -2840,8 +2840,10 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
2840{ 2840{
2841 struct pci_dev *pdev = dev_priv->drm.pdev; 2841 struct pci_dev *pdev = dev_priv->drm.pdev;
2842 struct device *kdev = &pdev->dev; 2842 struct device *kdev = &pdev->dev;
2843 int ret;
2843 2844
2844 pm_runtime_get_sync(kdev); 2845 ret = pm_runtime_get_sync(kdev);
2846 WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
2845 2847
2846 atomic_inc(&dev_priv->pm.wakeref_count); 2848 atomic_inc(&dev_priv->pm.wakeref_count);
2847 assert_rpm_wakelock_held(dev_priv); 2849 assert_rpm_wakelock_held(dev_priv);
@@ -2871,7 +2873,8 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
2871 * function, since the power state is undefined. This applies 2873 * function, since the power state is undefined. This applies
2872 * atm to the late/early system suspend/resume handlers. 2874 * atm to the late/early system suspend/resume handlers.
2873 */ 2875 */
2874 WARN_ON_ONCE(ret < 0); 2876 WARN_ONCE(ret < 0,
2877 "pm_runtime_get_if_in_use() failed: %d\n", ret);
2875 if (ret <= 0) 2878 if (ret <= 0)
2876 return false; 2879 return false;
2877 } 2880 }
@@ -2955,8 +2958,11 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
2955 * platforms without RPM support. 2958 * platforms without RPM support.
2956 */ 2959 */
2957 if (!HAS_RUNTIME_PM(dev_priv)) { 2960 if (!HAS_RUNTIME_PM(dev_priv)) {
2961 int ret;
2962
2958 pm_runtime_dont_use_autosuspend(kdev); 2963 pm_runtime_dont_use_autosuspend(kdev);
2959 pm_runtime_get_sync(kdev); 2964 ret = pm_runtime_get_sync(kdev);
2965 WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
2960 } else { 2966 } else {
2961 pm_runtime_use_autosuspend(kdev); 2967 pm_runtime_use_autosuspend(kdev);
2962 } 2968 }
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index b931d0bd7a64..f7d431427115 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -217,7 +217,7 @@ skl_update_plane(struct drm_plane *drm_plane,
217 struct drm_framebuffer *fb = plane_state->base.fb; 217 struct drm_framebuffer *fb = plane_state->base.fb;
218 enum plane_id plane_id = intel_plane->id; 218 enum plane_id plane_id = intel_plane->id;
219 enum pipe pipe = intel_plane->pipe; 219 enum pipe pipe = intel_plane->pipe;
220 u32 plane_ctl; 220 u32 plane_ctl = plane_state->ctl;
221 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 221 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
222 u32 surf_addr = plane_state->main.offset; 222 u32 surf_addr = plane_state->main.offset;
223 unsigned int rotation = plane_state->base.rotation; 223 unsigned int rotation = plane_state->base.rotation;
@@ -232,24 +232,6 @@ skl_update_plane(struct drm_plane *drm_plane,
232 uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; 232 uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
233 unsigned long irqflags; 233 unsigned long irqflags;
234 234
235 plane_ctl = PLANE_CTL_ENABLE;
236
237 if (!IS_GEMINILAKE(dev_priv)) {
238 plane_ctl |=
239 PLANE_CTL_PIPE_GAMMA_ENABLE |
240 PLANE_CTL_PIPE_CSC_ENABLE |
241 PLANE_CTL_PLANE_GAMMA_DISABLE;
242 }
243
244 plane_ctl |= skl_plane_ctl_format(fb->format->format);
245 plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
246 plane_ctl |= skl_plane_ctl_rotation(rotation);
247
248 if (key->flags & I915_SET_COLORKEY_DESTINATION)
249 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
250 else if (key->flags & I915_SET_COLORKEY_SOURCE)
251 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
252
253 /* Sizes are 0 based */ 235 /* Sizes are 0 based */
254 src_w--; 236 src_w--;
255 src_h--; 237 src_h--;
@@ -361,32 +343,15 @@ chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
361 I915_WRITE_FW(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0)); 343 I915_WRITE_FW(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
362} 344}
363 345
364static void 346static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
365vlv_update_plane(struct drm_plane *dplane, 347 const struct intel_plane_state *plane_state)
366 const struct intel_crtc_state *crtc_state,
367 const struct intel_plane_state *plane_state)
368{ 348{
369 struct drm_device *dev = dplane->dev; 349 const struct drm_framebuffer *fb = plane_state->base.fb;
370 struct drm_i915_private *dev_priv = to_i915(dev);
371 struct intel_plane *intel_plane = to_intel_plane(dplane);
372 struct drm_framebuffer *fb = plane_state->base.fb;
373 enum pipe pipe = intel_plane->pipe;
374 enum plane_id plane_id = intel_plane->id;
375 u32 sprctl;
376 u32 sprsurf_offset, linear_offset;
377 unsigned int rotation = plane_state->base.rotation; 350 unsigned int rotation = plane_state->base.rotation;
378 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 351 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
379 int crtc_x = plane_state->base.dst.x1; 352 u32 sprctl;
380 int crtc_y = plane_state->base.dst.y1;
381 uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
382 uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
383 uint32_t x = plane_state->base.src.x1 >> 16;
384 uint32_t y = plane_state->base.src.y1 >> 16;
385 uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
386 uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
387 unsigned long irqflags;
388 353
389 sprctl = SP_ENABLE; 354 sprctl = SP_ENABLE | SP_GAMMA_ENABLE;
390 355
391 switch (fb->format->format) { 356 switch (fb->format->format) {
392 case DRM_FORMAT_YUYV: 357 case DRM_FORMAT_YUYV:
@@ -423,20 +388,10 @@ vlv_update_plane(struct drm_plane *dplane,
423 sprctl |= SP_FORMAT_RGBA8888; 388 sprctl |= SP_FORMAT_RGBA8888;
424 break; 389 break;
425 default: 390 default:
426 /* 391 MISSING_CASE(fb->format->format);
427 * If we get here one of the upper layers failed to filter 392 return 0;
428 * out the unsupported plane formats
429 */
430 BUG();
431 break;
432 } 393 }
433 394
434 /*
435 * Enable gamma to match primary/cursor plane behaviour.
436 * FIXME should be user controllable via propertiesa.
437 */
438 sprctl |= SP_GAMMA_ENABLE;
439
440 if (fb->modifier == I915_FORMAT_MOD_X_TILED) 395 if (fb->modifier == I915_FORMAT_MOD_X_TILED)
441 sprctl |= SP_TILED; 396 sprctl |= SP_TILED;
442 397
@@ -449,22 +404,36 @@ vlv_update_plane(struct drm_plane *dplane,
449 if (key->flags & I915_SET_COLORKEY_SOURCE) 404 if (key->flags & I915_SET_COLORKEY_SOURCE)
450 sprctl |= SP_SOURCE_KEY; 405 sprctl |= SP_SOURCE_KEY;
451 406
407 return sprctl;
408}
409
410static void
411vlv_update_plane(struct drm_plane *dplane,
412 const struct intel_crtc_state *crtc_state,
413 const struct intel_plane_state *plane_state)
414{
415 struct drm_device *dev = dplane->dev;
416 struct drm_i915_private *dev_priv = to_i915(dev);
417 struct intel_plane *intel_plane = to_intel_plane(dplane);
418 struct drm_framebuffer *fb = plane_state->base.fb;
419 enum pipe pipe = intel_plane->pipe;
420 enum plane_id plane_id = intel_plane->id;
421 u32 sprctl = plane_state->ctl;
422 u32 sprsurf_offset = plane_state->main.offset;
423 u32 linear_offset;
424 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
425 int crtc_x = plane_state->base.dst.x1;
426 int crtc_y = plane_state->base.dst.y1;
427 uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
428 uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
429 uint32_t x = plane_state->main.x;
430 uint32_t y = plane_state->main.y;
431 unsigned long irqflags;
432
452 /* Sizes are 0 based */ 433 /* Sizes are 0 based */
453 src_w--;
454 src_h--;
455 crtc_w--; 434 crtc_w--;
456 crtc_h--; 435 crtc_h--;
457 436
458 intel_add_fb_offsets(&x, &y, plane_state, 0);
459 sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
460
461 if (rotation & DRM_ROTATE_180) {
462 x += src_w;
463 y += src_h;
464 } else if (rotation & DRM_REFLECT_X) {
465 x += src_w;
466 }
467
468 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); 437 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
469 438
470 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 439 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
@@ -516,31 +485,23 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
516 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 485 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
517} 486}
518 487
519static void 488static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
520ivb_update_plane(struct drm_plane *plane, 489 const struct intel_plane_state *plane_state)
521 const struct intel_crtc_state *crtc_state,
522 const struct intel_plane_state *plane_state)
523{ 490{
524 struct drm_device *dev = plane->dev; 491 struct drm_i915_private *dev_priv =
525 struct drm_i915_private *dev_priv = to_i915(dev); 492 to_i915(plane_state->base.plane->dev);
526 struct intel_plane *intel_plane = to_intel_plane(plane); 493 const struct drm_framebuffer *fb = plane_state->base.fb;
527 struct drm_framebuffer *fb = plane_state->base.fb;
528 enum pipe pipe = intel_plane->pipe;
529 u32 sprctl, sprscale = 0;
530 u32 sprsurf_offset, linear_offset;
531 unsigned int rotation = plane_state->base.rotation; 494 unsigned int rotation = plane_state->base.rotation;
532 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 495 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
533 int crtc_x = plane_state->base.dst.x1; 496 u32 sprctl;
534 int crtc_y = plane_state->base.dst.y1; 497
535 uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); 498 sprctl = SPRITE_ENABLE | SPRITE_GAMMA_ENABLE;
536 uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); 499
537 uint32_t x = plane_state->base.src.x1 >> 16; 500 if (IS_IVYBRIDGE(dev_priv))
538 uint32_t y = plane_state->base.src.y1 >> 16; 501 sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
539 uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
540 uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
541 unsigned long irqflags;
542 502
543 sprctl = SPRITE_ENABLE; 503 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
504 sprctl |= SPRITE_PIPE_CSC_ENABLE;
544 505
545 switch (fb->format->format) { 506 switch (fb->format->format) {
546 case DRM_FORMAT_XBGR8888: 507 case DRM_FORMAT_XBGR8888:
@@ -562,34 +523,48 @@ ivb_update_plane(struct drm_plane *plane,
562 sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY; 523 sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
563 break; 524 break;
564 default: 525 default:
565 BUG(); 526 MISSING_CASE(fb->format->format);
527 return 0;
566 } 528 }
567 529
568 /*
569 * Enable gamma to match primary/cursor plane behaviour.
570 * FIXME should be user controllable via propertiesa.
571 */
572 sprctl |= SPRITE_GAMMA_ENABLE;
573
574 if (fb->modifier == I915_FORMAT_MOD_X_TILED) 530 if (fb->modifier == I915_FORMAT_MOD_X_TILED)
575 sprctl |= SPRITE_TILED; 531 sprctl |= SPRITE_TILED;
576 532
577 if (rotation & DRM_ROTATE_180) 533 if (rotation & DRM_ROTATE_180)
578 sprctl |= SPRITE_ROTATE_180; 534 sprctl |= SPRITE_ROTATE_180;
579 535
580 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
581 sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
582 else
583 sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
584
585 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
586 sprctl |= SPRITE_PIPE_CSC_ENABLE;
587
588 if (key->flags & I915_SET_COLORKEY_DESTINATION) 536 if (key->flags & I915_SET_COLORKEY_DESTINATION)
589 sprctl |= SPRITE_DEST_KEY; 537 sprctl |= SPRITE_DEST_KEY;
590 else if (key->flags & I915_SET_COLORKEY_SOURCE) 538 else if (key->flags & I915_SET_COLORKEY_SOURCE)
591 sprctl |= SPRITE_SOURCE_KEY; 539 sprctl |= SPRITE_SOURCE_KEY;
592 540
541 return sprctl;
542}
543
544static void
545ivb_update_plane(struct drm_plane *plane,
546 const struct intel_crtc_state *crtc_state,
547 const struct intel_plane_state *plane_state)
548{
549 struct drm_device *dev = plane->dev;
550 struct drm_i915_private *dev_priv = to_i915(dev);
551 struct intel_plane *intel_plane = to_intel_plane(plane);
552 struct drm_framebuffer *fb = plane_state->base.fb;
553 enum pipe pipe = intel_plane->pipe;
554 u32 sprctl = plane_state->ctl, sprscale = 0;
555 u32 sprsurf_offset = plane_state->main.offset;
556 u32 linear_offset;
557 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
558 int crtc_x = plane_state->base.dst.x1;
559 int crtc_y = plane_state->base.dst.y1;
560 uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
561 uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
562 uint32_t x = plane_state->main.x;
563 uint32_t y = plane_state->main.y;
564 uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
565 uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
566 unsigned long irqflags;
567
593 /* Sizes are 0 based */ 568 /* Sizes are 0 based */
594 src_w--; 569 src_w--;
595 src_h--; 570 src_h--;
@@ -599,16 +574,6 @@ ivb_update_plane(struct drm_plane *plane,
599 if (crtc_w != src_w || crtc_h != src_h) 574 if (crtc_w != src_w || crtc_h != src_h)
600 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; 575 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
601 576
602 intel_add_fb_offsets(&x, &y, plane_state, 0);
603 sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
604
605 /* HSW+ does this automagically in hardware */
606 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) &&
607 rotation & DRM_ROTATE_180) {
608 x += src_w;
609 y += src_h;
610 }
611
612 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); 577 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
613 578
614 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 579 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
@@ -664,31 +629,20 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
664 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 629 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
665} 630}
666 631
667static void 632static u32 ilk_sprite_ctl(const struct intel_crtc_state *crtc_state,
668ilk_update_plane(struct drm_plane *plane, 633 const struct intel_plane_state *plane_state)
669 const struct intel_crtc_state *crtc_state,
670 const struct intel_plane_state *plane_state)
671{ 634{
672 struct drm_device *dev = plane->dev; 635 struct drm_i915_private *dev_priv =
673 struct drm_i915_private *dev_priv = to_i915(dev); 636 to_i915(plane_state->base.plane->dev);
674 struct intel_plane *intel_plane = to_intel_plane(plane); 637 const struct drm_framebuffer *fb = plane_state->base.fb;
675 struct drm_framebuffer *fb = plane_state->base.fb;
676 int pipe = intel_plane->pipe;
677 u32 dvscntr, dvsscale;
678 u32 dvssurf_offset, linear_offset;
679 unsigned int rotation = plane_state->base.rotation; 638 unsigned int rotation = plane_state->base.rotation;
680 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 639 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
681 int crtc_x = plane_state->base.dst.x1; 640 u32 dvscntr;
682 int crtc_y = plane_state->base.dst.y1;
683 uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
684 uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
685 uint32_t x = plane_state->base.src.x1 >> 16;
686 uint32_t y = plane_state->base.src.y1 >> 16;
687 uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
688 uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
689 unsigned long irqflags;
690 641
691 dvscntr = DVS_ENABLE; 642 dvscntr = DVS_ENABLE | DVS_GAMMA_ENABLE;
643
644 if (IS_GEN6(dev_priv))
645 dvscntr |= DVS_TRICKLE_FEED_DISABLE;
692 646
693 switch (fb->format->format) { 647 switch (fb->format->format) {
694 case DRM_FORMAT_XBGR8888: 648 case DRM_FORMAT_XBGR8888:
@@ -710,47 +664,57 @@ ilk_update_plane(struct drm_plane *plane,
710 dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY; 664 dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
711 break; 665 break;
712 default: 666 default:
713 BUG(); 667 MISSING_CASE(fb->format->format);
668 return 0;
714 } 669 }
715 670
716 /*
717 * Enable gamma to match primary/cursor plane behaviour.
718 * FIXME should be user controllable via propertiesa.
719 */
720 dvscntr |= DVS_GAMMA_ENABLE;
721
722 if (fb->modifier == I915_FORMAT_MOD_X_TILED) 671 if (fb->modifier == I915_FORMAT_MOD_X_TILED)
723 dvscntr |= DVS_TILED; 672 dvscntr |= DVS_TILED;
724 673
725 if (rotation & DRM_ROTATE_180) 674 if (rotation & DRM_ROTATE_180)
726 dvscntr |= DVS_ROTATE_180; 675 dvscntr |= DVS_ROTATE_180;
727 676
728 if (IS_GEN6(dev_priv))
729 dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
730
731 if (key->flags & I915_SET_COLORKEY_DESTINATION) 677 if (key->flags & I915_SET_COLORKEY_DESTINATION)
732 dvscntr |= DVS_DEST_KEY; 678 dvscntr |= DVS_DEST_KEY;
733 else if (key->flags & I915_SET_COLORKEY_SOURCE) 679 else if (key->flags & I915_SET_COLORKEY_SOURCE)
734 dvscntr |= DVS_SOURCE_KEY; 680 dvscntr |= DVS_SOURCE_KEY;
735 681
682 return dvscntr;
683}
684
685static void
686ilk_update_plane(struct drm_plane *plane,
687 const struct intel_crtc_state *crtc_state,
688 const struct intel_plane_state *plane_state)
689{
690 struct drm_device *dev = plane->dev;
691 struct drm_i915_private *dev_priv = to_i915(dev);
692 struct intel_plane *intel_plane = to_intel_plane(plane);
693 struct drm_framebuffer *fb = plane_state->base.fb;
694 int pipe = intel_plane->pipe;
695 u32 dvscntr = plane_state->ctl, dvsscale = 0;
696 u32 dvssurf_offset = plane_state->main.offset;
697 u32 linear_offset;
698 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
699 int crtc_x = plane_state->base.dst.x1;
700 int crtc_y = plane_state->base.dst.y1;
701 uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
702 uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
703 uint32_t x = plane_state->main.x;
704 uint32_t y = plane_state->main.y;
705 uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
706 uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
707 unsigned long irqflags;
708
736 /* Sizes are 0 based */ 709 /* Sizes are 0 based */
737 src_w--; 710 src_w--;
738 src_h--; 711 src_h--;
739 crtc_w--; 712 crtc_w--;
740 crtc_h--; 713 crtc_h--;
741 714
742 dvsscale = 0;
743 if (crtc_w != src_w || crtc_h != src_h) 715 if (crtc_w != src_w || crtc_h != src_h)
744 dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; 716 dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
745 717
746 intel_add_fb_offsets(&x, &y, plane_state, 0);
747 dvssurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
748
749 if (rotation & DRM_ROTATE_180) {
750 x += src_w;
751 y += src_h;
752 }
753
754 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); 718 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
755 719
756 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 720 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
@@ -981,6 +945,26 @@ intel_check_sprite_plane(struct drm_plane *plane,
981 ret = skl_check_plane_surface(state); 945 ret = skl_check_plane_surface(state);
982 if (ret) 946 if (ret)
983 return ret; 947 return ret;
948
949 state->ctl = skl_plane_ctl(crtc_state, state);
950 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
951 ret = i9xx_check_plane_surface(state);
952 if (ret)
953 return ret;
954
955 state->ctl = vlv_sprite_ctl(crtc_state, state);
956 } else if (INTEL_GEN(dev_priv) >= 7) {
957 ret = i9xx_check_plane_surface(state);
958 if (ret)
959 return ret;
960
961 state->ctl = ivb_sprite_ctl(crtc_state, state);
962 } else {
963 ret = i9xx_check_plane_surface(state);
964 if (ret)
965 return ret;
966
967 state->ctl = ilk_sprite_ctl(crtc_state, state);
984 } 968 }
985 969
986 return 0; 970 return 0;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 6ed1a3ce47b7..e077c2a9e694 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1315,8 +1315,10 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
1315 * Currently this always returns CONNECTOR_STATUS_UNKNOWN, as we need to be sure 1315 * Currently this always returns CONNECTOR_STATUS_UNKNOWN, as we need to be sure
1316 * we have a pipe programmed in order to probe the TV. 1316 * we have a pipe programmed in order to probe the TV.
1317 */ 1317 */
1318static enum drm_connector_status 1318static int
1319intel_tv_detect(struct drm_connector *connector, bool force) 1319intel_tv_detect(struct drm_connector *connector,
1320 struct drm_modeset_acquire_ctx *ctx,
1321 bool force)
1320{ 1322{
1321 struct drm_display_mode mode; 1323 struct drm_display_mode mode;
1322 struct intel_tv *intel_tv = intel_attached_tv(connector); 1324 struct intel_tv *intel_tv = intel_attached_tv(connector);
@@ -1331,21 +1333,20 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1331 1333
1332 if (force) { 1334 if (force) {
1333 struct intel_load_detect_pipe tmp; 1335 struct intel_load_detect_pipe tmp;
1334 struct drm_modeset_acquire_ctx ctx; 1336 int ret;
1335 1337
1336 drm_modeset_acquire_init(&ctx, 0); 1338 ret = intel_get_load_detect_pipe(connector, &mode, &tmp, ctx);
1339 if (ret < 0)
1340 return ret;
1337 1341
1338 if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) { 1342 if (ret > 0) {
1339 type = intel_tv_detect_type(intel_tv, connector); 1343 type = intel_tv_detect_type(intel_tv, connector);
1340 intel_release_load_detect_pipe(connector, &tmp, &ctx); 1344 intel_release_load_detect_pipe(connector, &tmp, ctx);
1341 status = type < 0 ? 1345 status = type < 0 ?
1342 connector_status_disconnected : 1346 connector_status_disconnected :
1343 connector_status_connected; 1347 connector_status_connected;
1344 } else 1348 } else
1345 status = connector_status_unknown; 1349 status = connector_status_unknown;
1346
1347 drm_modeset_drop_locks(&ctx);
1348 drm_modeset_acquire_fini(&ctx);
1349 } else 1350 } else
1350 return connector->status; 1351 return connector->status;
1351 1352
@@ -1516,7 +1517,6 @@ out:
1516 1517
1517static const struct drm_connector_funcs intel_tv_connector_funcs = { 1518static const struct drm_connector_funcs intel_tv_connector_funcs = {
1518 .dpms = drm_atomic_helper_connector_dpms, 1519 .dpms = drm_atomic_helper_connector_dpms,
1519 .detect = intel_tv_detect,
1520 .late_register = intel_connector_register, 1520 .late_register = intel_connector_register,
1521 .early_unregister = intel_connector_unregister, 1521 .early_unregister = intel_connector_unregister,
1522 .destroy = intel_tv_destroy, 1522 .destroy = intel_tv_destroy,
@@ -1528,6 +1528,7 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
1528}; 1528};
1529 1529
1530static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { 1530static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
1531 .detect_ctx = intel_tv_detect,
1531 .mode_valid = intel_tv_mode_valid, 1532 .mode_valid = intel_tv_mode_valid,
1532 .get_modes = intel_tv_get_modes, 1533 .get_modes = intel_tv_get_modes,
1533}; 1534};
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index d15a7d9d4eb0..c117424f1f50 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -26,6 +26,19 @@
26#include "intel_uc.h" 26#include "intel_uc.h"
27#include <linux/firmware.h> 27#include <linux/firmware.h>
28 28
29/* Cleans up uC firmware by releasing the firmware GEM obj.
30 */
31static void __intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
32{
33 struct drm_i915_gem_object *obj;
34
35 obj = fetch_and_zero(&uc_fw->obj);
36 if (obj)
37 i915_gem_object_put(obj);
38
39 uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
40}
41
29/* Reset GuC providing us with fresh state for both GuC and HuC. 42/* Reset GuC providing us with fresh state for both GuC and HuC.
30 */ 43 */
31static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv) 44static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
@@ -83,23 +96,166 @@ void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
83 96
84void intel_uc_init_early(struct drm_i915_private *dev_priv) 97void intel_uc_init_early(struct drm_i915_private *dev_priv)
85{ 98{
86 mutex_init(&dev_priv->guc.send_mutex); 99 struct intel_guc *guc = &dev_priv->guc;
100
101 mutex_init(&guc->send_mutex);
102 guc->send = intel_guc_send_mmio;
103}
104
105static void fetch_uc_fw(struct drm_i915_private *dev_priv,
106 struct intel_uc_fw *uc_fw)
107{
108 struct pci_dev *pdev = dev_priv->drm.pdev;
109 struct drm_i915_gem_object *obj;
110 const struct firmware *fw = NULL;
111 struct uc_css_header *css;
112 size_t size;
113 int err;
114
115 if (!uc_fw->path)
116 return;
117
118 uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
119
120 DRM_DEBUG_DRIVER("before requesting firmware: uC fw fetch status %s\n",
121 intel_uc_fw_status_repr(uc_fw->fetch_status));
122
123 err = request_firmware(&fw, uc_fw->path, &pdev->dev);
124 if (err)
125 goto fail;
126 if (!fw)
127 goto fail;
128
129 DRM_DEBUG_DRIVER("fetch uC fw from %s succeeded, fw %p\n",
130 uc_fw->path, fw);
131
132 /* Check the size of the blob before examining buffer contents */
133 if (fw->size < sizeof(struct uc_css_header)) {
134 DRM_NOTE("Firmware header is missing\n");
135 goto fail;
136 }
137
138 css = (struct uc_css_header *)fw->data;
139
140 /* Firmware bits always start from header */
141 uc_fw->header_offset = 0;
142 uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
143 css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
144
145 if (uc_fw->header_size != sizeof(struct uc_css_header)) {
146 DRM_NOTE("CSS header definition mismatch\n");
147 goto fail;
148 }
149
150 /* then, uCode */
151 uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
152 uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
153
154 /* now RSA */
155 if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
156 DRM_NOTE("RSA key size is bad\n");
157 goto fail;
158 }
159 uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
160 uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
161
162 /* At least, it should have header, uCode and RSA. Size of all three. */
163 size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
164 if (fw->size < size) {
165 DRM_NOTE("Missing firmware components\n");
166 goto fail;
167 }
168
169 /*
170 * The GuC firmware image has the version number embedded at a
171 * well-known offset within the firmware blob; note that major / minor
172 * version are TWO bytes each (i.e. u16), although all pointers and
173 * offsets are defined in terms of bytes (u8).
174 */
175 switch (uc_fw->type) {
176 case INTEL_UC_FW_TYPE_GUC:
177 /* Header and uCode will be loaded to WOPCM. Size of the two. */
178 size = uc_fw->header_size + uc_fw->ucode_size;
179
180 /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
181 if (size > intel_guc_wopcm_size(dev_priv)) {
182 DRM_ERROR("Firmware is too large to fit in WOPCM\n");
183 goto fail;
184 }
185 uc_fw->major_ver_found = css->guc.sw_version >> 16;
186 uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
187 break;
188
189 case INTEL_UC_FW_TYPE_HUC:
190 uc_fw->major_ver_found = css->huc.sw_version >> 16;
191 uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF;
192 break;
193
194 default:
195 DRM_ERROR("Unknown firmware type %d\n", uc_fw->type);
196 err = -ENOEXEC;
197 goto fail;
198 }
199
200 if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) {
201 DRM_NOTE("Skipping %s firmware version check\n",
202 intel_uc_fw_type_repr(uc_fw->type));
203 } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
204 uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
205 DRM_NOTE("%s firmware version %d.%d, required %d.%d\n",
206 intel_uc_fw_type_repr(uc_fw->type),
207 uc_fw->major_ver_found, uc_fw->minor_ver_found,
208 uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
209 err = -ENOEXEC;
210 goto fail;
211 }
212
213 DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
214 uc_fw->major_ver_found, uc_fw->minor_ver_found,
215 uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
216
217 obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
218 if (IS_ERR(obj)) {
219 err = PTR_ERR(obj);
220 goto fail;
221 }
222
223 uc_fw->obj = obj;
224 uc_fw->size = fw->size;
225
226 DRM_DEBUG_DRIVER("uC fw fetch status SUCCESS, obj %p\n",
227 uc_fw->obj);
228
229 release_firmware(fw);
230 uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
231 return;
232
233fail:
234 DRM_WARN("Failed to fetch valid uC firmware from %s (error %d)\n",
235 uc_fw->path, err);
236 DRM_DEBUG_DRIVER("uC fw fetch status FAIL; err %d, fw %p, obj %p\n",
237 err, fw, uc_fw->obj);
238
239 release_firmware(fw); /* OK even if fw is NULL */
240 uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
87} 241}
88 242
89void intel_uc_init_fw(struct drm_i915_private *dev_priv) 243void intel_uc_init_fw(struct drm_i915_private *dev_priv)
90{ 244{
91 if (dev_priv->huc.fw.path) 245 fetch_uc_fw(dev_priv, &dev_priv->huc.fw);
92 intel_uc_prepare_fw(dev_priv, &dev_priv->huc.fw); 246 fetch_uc_fw(dev_priv, &dev_priv->guc.fw);
247}
93 248
94 if (dev_priv->guc.fw.path) 249void intel_uc_fini_fw(struct drm_i915_private *dev_priv)
95 intel_uc_prepare_fw(dev_priv, &dev_priv->guc.fw); 250{
251 __intel_uc_fw_fini(&dev_priv->guc.fw);
252 __intel_uc_fw_fini(&dev_priv->huc.fw);
96} 253}
97 254
98int intel_uc_init_hw(struct drm_i915_private *dev_priv) 255int intel_uc_init_hw(struct drm_i915_private *dev_priv)
99{ 256{
100 int ret, attempts; 257 int ret, attempts;
101 258
102 /* GuC not enabled, nothing to do */
103 if (!i915.enable_guc_loading) 259 if (!i915.enable_guc_loading)
104 return 0; 260 return 0;
105 261
@@ -109,9 +265,13 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
109 i915_ggtt_enable_guc(dev_priv); 265 i915_ggtt_enable_guc(dev_priv);
110 266
111 if (i915.enable_guc_submission) { 267 if (i915.enable_guc_submission) {
268 /*
269 * This is stuff we need to have available at fw load time
270 * if we are planning to enable submission later
271 */
112 ret = i915_guc_submission_init(dev_priv); 272 ret = i915_guc_submission_init(dev_priv);
113 if (ret) 273 if (ret)
114 goto err; 274 goto err_guc;
115 } 275 }
116 276
117 /* WaEnableuKernelHeaderValidFix:skl */ 277 /* WaEnableuKernelHeaderValidFix:skl */
@@ -150,7 +310,7 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
150 310
151 ret = i915_guc_submission_enable(dev_priv); 311 ret = i915_guc_submission_enable(dev_priv);
152 if (ret) 312 if (ret)
153 goto err_submission; 313 goto err_interrupts;
154 } 314 }
155 315
156 return 0; 316 return 0;
@@ -164,11 +324,12 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
164 * nonfatal error (i.e. it doesn't prevent driver load, but 324 * nonfatal error (i.e. it doesn't prevent driver load, but
165 * marks the GPU as wedged until reset). 325 * marks the GPU as wedged until reset).
166 */ 326 */
327err_interrupts:
328 gen9_disable_guc_interrupts(dev_priv);
167err_submission: 329err_submission:
168 if (i915.enable_guc_submission) 330 if (i915.enable_guc_submission)
169 i915_guc_submission_fini(dev_priv); 331 i915_guc_submission_fini(dev_priv);
170 332err_guc:
171err:
172 i915_ggtt_disable_guc(dev_priv); 333 i915_ggtt_disable_guc(dev_priv);
173 334
174 DRM_ERROR("GuC init failed\n"); 335 DRM_ERROR("GuC init failed\n");
@@ -185,11 +346,24 @@ err:
185 return ret; 346 return ret;
186} 347}
187 348
349void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
350{
351 if (!i915.enable_guc_loading)
352 return;
353
354 if (i915.enable_guc_submission) {
355 i915_guc_submission_disable(dev_priv);
356 gen9_disable_guc_interrupts(dev_priv);
357 i915_guc_submission_fini(dev_priv);
358 }
359 i915_ggtt_disable_guc(dev_priv);
360}
361
188/* 362/*
189 * Read GuC command/status register (SOFT_SCRATCH_0) 363 * Read GuC command/status register (SOFT_SCRATCH_0)
190 * Return true if it contains a response rather than a command 364 * Return true if it contains a response rather than a command
191 */ 365 */
192static bool intel_guc_recv(struct intel_guc *guc, u32 *status) 366static bool guc_recv(struct intel_guc *guc, u32 *status)
193{ 367{
194 struct drm_i915_private *dev_priv = guc_to_i915(guc); 368 struct drm_i915_private *dev_priv = guc_to_i915(guc);
195 369
@@ -198,7 +372,10 @@ static bool intel_guc_recv(struct intel_guc *guc, u32 *status)
198 return INTEL_GUC_RECV_IS_RESPONSE(val); 372 return INTEL_GUC_RECV_IS_RESPONSE(val);
199} 373}
200 374
201int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) 375/*
376 * This function implements the MMIO based host to GuC interface.
377 */
378int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
202{ 379{
203 struct drm_i915_private *dev_priv = guc_to_i915(guc); 380 struct drm_i915_private *dev_priv = guc_to_i915(guc);
204 u32 status; 381 u32 status;
@@ -209,7 +386,7 @@ int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
209 return -EINVAL; 386 return -EINVAL;
210 387
211 mutex_lock(&guc->send_mutex); 388 mutex_lock(&guc->send_mutex);
212 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 389 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_BLITTER);
213 390
214 dev_priv->guc.action_count += 1; 391 dev_priv->guc.action_count += 1;
215 dev_priv->guc.action_cmd = action[0]; 392 dev_priv->guc.action_cmd = action[0];
@@ -226,9 +403,9 @@ int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
226 * up to that length of time, then switch to a slower sleep-wait loop. 403 * up to that length of time, then switch to a slower sleep-wait loop.
227 * No inte_guc_send command should ever take longer than 10ms. 404 * No inte_guc_send command should ever take longer than 10ms.
228 */ 405 */
229 ret = wait_for_us(intel_guc_recv(guc, &status), 10); 406 ret = wait_for_us(guc_recv(guc, &status), 10);
230 if (ret) 407 if (ret)
231 ret = wait_for(intel_guc_recv(guc, &status), 10); 408 ret = wait_for(guc_recv(guc, &status), 10);
232 if (status != INTEL_GUC_STATUS_SUCCESS) { 409 if (status != INTEL_GUC_STATUS_SUCCESS) {
233 /* 410 /*
234 * Either the GuC explicitly returned an error (which 411 * Either the GuC explicitly returned an error (which
@@ -247,7 +424,7 @@ int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
247 } 424 }
248 dev_priv->guc.action_status = status; 425 dev_priv->guc.action_status = status;
249 426
250 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 427 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_BLITTER);
251 mutex_unlock(&guc->send_mutex); 428 mutex_unlock(&guc->send_mutex);
252 429
253 return ret; 430 return ret;
@@ -268,136 +445,3 @@ int intel_guc_sample_forcewake(struct intel_guc *guc)
268 445
269 return intel_guc_send(guc, action, ARRAY_SIZE(action)); 446 return intel_guc_send(guc, action, ARRAY_SIZE(action));
270} 447}
271
272void intel_uc_prepare_fw(struct drm_i915_private *dev_priv,
273 struct intel_uc_fw *uc_fw)
274{
275 struct pci_dev *pdev = dev_priv->drm.pdev;
276 struct drm_i915_gem_object *obj;
277 const struct firmware *fw = NULL;
278 struct uc_css_header *css;
279 size_t size;
280 int err;
281
282 uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
283
284 DRM_DEBUG_DRIVER("before requesting firmware: uC fw fetch status %s\n",
285 intel_uc_fw_status_repr(uc_fw->fetch_status));
286
287 err = request_firmware(&fw, uc_fw->path, &pdev->dev);
288 if (err)
289 goto fail;
290 if (!fw)
291 goto fail;
292
293 DRM_DEBUG_DRIVER("fetch uC fw from %s succeeded, fw %p\n",
294 uc_fw->path, fw);
295
296 /* Check the size of the blob before examining buffer contents */
297 if (fw->size < sizeof(struct uc_css_header)) {
298 DRM_NOTE("Firmware header is missing\n");
299 goto fail;
300 }
301
302 css = (struct uc_css_header *)fw->data;
303
304 /* Firmware bits always start from header */
305 uc_fw->header_offset = 0;
306 uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
307 css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
308
309 if (uc_fw->header_size != sizeof(struct uc_css_header)) {
310 DRM_NOTE("CSS header definition mismatch\n");
311 goto fail;
312 }
313
314 /* then, uCode */
315 uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
316 uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
317
318 /* now RSA */
319 if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
320 DRM_NOTE("RSA key size is bad\n");
321 goto fail;
322 }
323 uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
324 uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
325
326 /* At least, it should have header, uCode and RSA. Size of all three. */
327 size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
328 if (fw->size < size) {
329 DRM_NOTE("Missing firmware components\n");
330 goto fail;
331 }
332
333 /*
334 * The GuC firmware image has the version number embedded at a
335 * well-known offset within the firmware blob; note that major / minor
336 * version are TWO bytes each (i.e. u16), although all pointers and
337 * offsets are defined in terms of bytes (u8).
338 */
339 switch (uc_fw->type) {
340 case INTEL_UC_FW_TYPE_GUC:
341 /* Header and uCode will be loaded to WOPCM. Size of the two. */
342 size = uc_fw->header_size + uc_fw->ucode_size;
343
344 /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
345 if (size > intel_guc_wopcm_size(dev_priv)) {
346 DRM_ERROR("Firmware is too large to fit in WOPCM\n");
347 goto fail;
348 }
349 uc_fw->major_ver_found = css->guc.sw_version >> 16;
350 uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
351 break;
352
353 case INTEL_UC_FW_TYPE_HUC:
354 uc_fw->major_ver_found = css->huc.sw_version >> 16;
355 uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF;
356 break;
357
358 default:
359 DRM_ERROR("Unknown firmware type %d\n", uc_fw->type);
360 err = -ENOEXEC;
361 goto fail;
362 }
363
364 if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) {
365 DRM_NOTE("Skipping uC firmware version check\n");
366 } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
367 uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
368 DRM_NOTE("uC firmware version %d.%d, required %d.%d\n",
369 uc_fw->major_ver_found, uc_fw->minor_ver_found,
370 uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
371 err = -ENOEXEC;
372 goto fail;
373 }
374
375 DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
376 uc_fw->major_ver_found, uc_fw->minor_ver_found,
377 uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
378
379 obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
380 if (IS_ERR(obj)) {
381 err = PTR_ERR(obj);
382 goto fail;
383 }
384
385 uc_fw->obj = obj;
386 uc_fw->size = fw->size;
387
388 DRM_DEBUG_DRIVER("uC fw fetch status SUCCESS, obj %p\n",
389 uc_fw->obj);
390
391 release_firmware(fw);
392 uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
393 return;
394
395fail:
396 DRM_WARN("Failed to fetch valid uC firmware from %s (error %d)\n",
397 uc_fw->path, err);
398 DRM_DEBUG_DRIVER("uC fw fetch status FAIL; err %d, fw %p, obj %p\n",
399 err, fw, uc_fw->obj);
400
401 release_firmware(fw); /* OK even if fw is NULL */
402 uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
403}
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index a35ededfaa40..4b7f73aeddac 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -34,7 +34,9 @@ struct drm_i915_gem_request;
34 34
35/* 35/*
36 * This structure primarily describes the GEM object shared with the GuC. 36 * This structure primarily describes the GEM object shared with the GuC.
37 * The GEM object is held for the entire lifetime of our interaction with 37 * The specs sometimes refer to this object as a "GuC context", but we use
38 * the term "client" to avoid confusion with hardware contexts. This
39 * GEM object is held for the entire lifetime of our interaction with
38 * the GuC, being allocated before the GuC is loaded with its firmware. 40 * the GuC, being allocated before the GuC is loaded with its firmware.
39 * Because there's no way to update the address used by the GuC after 41 * Because there's no way to update the address used by the GuC after
40 * initialisation, the shared object must stay pinned into the GGTT as 42 * initialisation, the shared object must stay pinned into the GGTT as
@@ -44,7 +46,7 @@ struct drm_i915_gem_request;
44 * 46 *
45 * The single GEM object described here is actually made up of several 47 * The single GEM object described here is actually made up of several
46 * separate areas, as far as the GuC is concerned. The first page (kept 48 * separate areas, as far as the GuC is concerned. The first page (kept
47 * kmap'd) includes the "process decriptor" which holds sequence data for 49 * kmap'd) includes the "process descriptor" which holds sequence data for
48 * the doorbell, and one cacheline which actually *is* the doorbell; a 50 * the doorbell, and one cacheline which actually *is* the doorbell; a
49 * write to this will "ring the doorbell" (i.e. send an interrupt to the 51 * write to this will "ring the doorbell" (i.e. send an interrupt to the
50 * GuC). The subsequent pages of the client object constitute the work 52 * GuC). The subsequent pages of the client object constitute the work
@@ -72,13 +74,12 @@ struct i915_guc_client {
72 74
73 uint32_t engines; /* bitmap of (host) engine ids */ 75 uint32_t engines; /* bitmap of (host) engine ids */
74 uint32_t priority; 76 uint32_t priority;
75 uint32_t ctx_index; 77 u32 stage_id;
76 uint32_t proc_desc_offset; 78 uint32_t proc_desc_offset;
77 79
78 uint32_t doorbell_offset; 80 u16 doorbell_id;
79 uint32_t doorbell_cookie; 81 unsigned long doorbell_offset;
80 uint16_t doorbell_id; 82 u32 doorbell_cookie;
81 uint16_t padding[3]; /* Maintain alignment */
82 83
83 spinlock_t wq_lock; 84 spinlock_t wq_lock;
84 uint32_t wq_offset; 85 uint32_t wq_offset;
@@ -100,11 +101,40 @@ enum intel_uc_fw_status {
100 INTEL_UC_FIRMWARE_SUCCESS 101 INTEL_UC_FIRMWARE_SUCCESS
101}; 102};
102 103
104/* User-friendly representation of an enum */
105static inline
106const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
107{
108 switch (status) {
109 case INTEL_UC_FIRMWARE_FAIL:
110 return "FAIL";
111 case INTEL_UC_FIRMWARE_NONE:
112 return "NONE";
113 case INTEL_UC_FIRMWARE_PENDING:
114 return "PENDING";
115 case INTEL_UC_FIRMWARE_SUCCESS:
116 return "SUCCESS";
117 }
118 return "<invalid>";
119}
120
103enum intel_uc_fw_type { 121enum intel_uc_fw_type {
104 INTEL_UC_FW_TYPE_GUC, 122 INTEL_UC_FW_TYPE_GUC,
105 INTEL_UC_FW_TYPE_HUC 123 INTEL_UC_FW_TYPE_HUC
106}; 124};
107 125
126/* User-friendly representation of an enum */
127static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type)
128{
129 switch (type) {
130 case INTEL_UC_FW_TYPE_GUC:
131 return "GuC";
132 case INTEL_UC_FW_TYPE_HUC:
133 return "HuC";
134 }
135 return "uC";
136}
137
108/* 138/*
109 * This structure encapsulates all the data needed during the process 139 * This structure encapsulates all the data needed during the process
110 * of fetching, caching, and loading the firmware image into the GuC. 140 * of fetching, caching, and loading the firmware image into the GuC.
@@ -133,11 +163,13 @@ struct intel_uc_fw {
133struct intel_guc_log { 163struct intel_guc_log {
134 uint32_t flags; 164 uint32_t flags;
135 struct i915_vma *vma; 165 struct i915_vma *vma;
136 void *buf_addr; 166 /* The runtime stuff gets created only when GuC logging gets enabled */
137 struct workqueue_struct *flush_wq; 167 struct {
138 struct work_struct flush_work; 168 void *buf_addr;
139 struct rchan *relay_chan; 169 struct workqueue_struct *flush_wq;
140 170 struct work_struct flush_work;
171 struct rchan *relay_chan;
172 } runtime;
141 /* logging related stats */ 173 /* logging related stats */
142 u32 capture_miss_count; 174 u32 capture_miss_count;
143 u32 flush_interrupt_count; 175 u32 flush_interrupt_count;
@@ -154,12 +186,13 @@ struct intel_guc {
154 bool interrupts_enabled; 186 bool interrupts_enabled;
155 187
156 struct i915_vma *ads_vma; 188 struct i915_vma *ads_vma;
157 struct i915_vma *ctx_pool_vma; 189 struct i915_vma *stage_desc_pool;
158 struct ida ctx_ids; 190 void *stage_desc_pool_vaddr;
191 struct ida stage_ids;
159 192
160 struct i915_guc_client *execbuf_client; 193 struct i915_guc_client *execbuf_client;
161 194
162 DECLARE_BITMAP(doorbell_bitmap, GUC_MAX_DOORBELLS); 195 DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
163 uint32_t db_cacheline; /* Cyclic counter mod pagesize */ 196 uint32_t db_cacheline; /* Cyclic counter mod pagesize */
164 197
165 /* Action status & statistics */ 198 /* Action status & statistics */
@@ -174,6 +207,9 @@ struct intel_guc {
174 207
175 /* To serialize the intel_guc_send actions */ 208 /* To serialize the intel_guc_send actions */
176 struct mutex send_mutex; 209 struct mutex send_mutex;
210
211 /* GuC's FW specific send function */
212 int (*send)(struct intel_guc *guc, const u32 *data, u32 len);
177}; 213};
178 214
179struct intel_huc { 215struct intel_huc {
@@ -187,17 +223,19 @@ struct intel_huc {
187void intel_uc_sanitize_options(struct drm_i915_private *dev_priv); 223void intel_uc_sanitize_options(struct drm_i915_private *dev_priv);
188void intel_uc_init_early(struct drm_i915_private *dev_priv); 224void intel_uc_init_early(struct drm_i915_private *dev_priv);
189void intel_uc_init_fw(struct drm_i915_private *dev_priv); 225void intel_uc_init_fw(struct drm_i915_private *dev_priv);
226void intel_uc_fini_fw(struct drm_i915_private *dev_priv);
190int intel_uc_init_hw(struct drm_i915_private *dev_priv); 227int intel_uc_init_hw(struct drm_i915_private *dev_priv);
191void intel_uc_prepare_fw(struct drm_i915_private *dev_priv, 228void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
192 struct intel_uc_fw *uc_fw);
193int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len);
194int intel_guc_sample_forcewake(struct intel_guc *guc); 229int intel_guc_sample_forcewake(struct intel_guc *guc);
230int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
231static inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
232{
233 return guc->send(guc, action, len);
234}
195 235
196/* intel_guc_loader.c */ 236/* intel_guc_loader.c */
197int intel_guc_select_fw(struct intel_guc *guc); 237int intel_guc_select_fw(struct intel_guc *guc);
198int intel_guc_init_hw(struct intel_guc *guc); 238int intel_guc_init_hw(struct intel_guc *guc);
199void intel_guc_fini(struct drm_i915_private *dev_priv);
200const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status);
201int intel_guc_suspend(struct drm_i915_private *dev_priv); 239int intel_guc_suspend(struct drm_i915_private *dev_priv);
202int intel_guc_resume(struct drm_i915_private *dev_priv); 240int intel_guc_resume(struct drm_i915_private *dev_priv);
203u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv); 241u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
@@ -212,10 +250,11 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
212struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); 250struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
213 251
214/* intel_guc_log.c */ 252/* intel_guc_log.c */
215void intel_guc_log_create(struct intel_guc *guc); 253int intel_guc_log_create(struct intel_guc *guc);
254void intel_guc_log_destroy(struct intel_guc *guc);
255int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
216void i915_guc_log_register(struct drm_i915_private *dev_priv); 256void i915_guc_log_register(struct drm_i915_private *dev_priv);
217void i915_guc_log_unregister(struct drm_i915_private *dev_priv); 257void i915_guc_log_unregister(struct drm_i915_private *dev_priv);
218int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
219 258
220static inline u32 guc_ggtt_offset(struct i915_vma *vma) 259static inline u32 guc_ggtt_offset(struct i915_vma *vma)
221{ 260{
@@ -227,7 +266,6 @@ static inline u32 guc_ggtt_offset(struct i915_vma *vma)
227 266
228/* intel_huc.c */ 267/* intel_huc.c */
229void intel_huc_select_fw(struct intel_huc *huc); 268void intel_huc_select_fw(struct intel_huc *huc);
230void intel_huc_fini(struct drm_i915_private *dev_priv);
231int intel_huc_init_hw(struct intel_huc *huc); 269int intel_huc_init_hw(struct intel_huc *huc);
232void intel_guc_auth_huc(struct drm_i915_private *dev_priv); 270void intel_guc_auth_huc(struct drm_i915_private *dev_priv);
233 271
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 09f5f02d7901..6d1ea26b2493 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -52,10 +52,10 @@ intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
52} 52}
53 53
54static inline void 54static inline void
55fw_domain_reset(const struct intel_uncore_forcewake_domain *d) 55fw_domain_reset(struct drm_i915_private *i915,
56 const struct intel_uncore_forcewake_domain *d)
56{ 57{
57 WARN_ON(!i915_mmio_reg_valid(d->reg_set)); 58 __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset);
58 __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
59} 59}
60 60
61static inline void 61static inline void
@@ -69,9 +69,10 @@ fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
69} 69}
70 70
71static inline void 71static inline void
72fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d) 72fw_domain_wait_ack_clear(const struct drm_i915_private *i915,
73 const struct intel_uncore_forcewake_domain *d)
73{ 74{
74 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) & 75 if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) &
75 FORCEWAKE_KERNEL) == 0, 76 FORCEWAKE_KERNEL) == 0,
76 FORCEWAKE_ACK_TIMEOUT_MS)) 77 FORCEWAKE_ACK_TIMEOUT_MS))
77 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n", 78 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
@@ -79,15 +80,17 @@ fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
79} 80}
80 81
81static inline void 82static inline void
82fw_domain_get(const struct intel_uncore_forcewake_domain *d) 83fw_domain_get(struct drm_i915_private *i915,
84 const struct intel_uncore_forcewake_domain *d)
83{ 85{
84 __raw_i915_write32(d->i915, d->reg_set, d->val_set); 86 __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_set);
85} 87}
86 88
87static inline void 89static inline void
88fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d) 90fw_domain_wait_ack(const struct drm_i915_private *i915,
91 const struct intel_uncore_forcewake_domain *d)
89{ 92{
90 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) & 93 if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) &
91 FORCEWAKE_KERNEL), 94 FORCEWAKE_KERNEL),
92 FORCEWAKE_ACK_TIMEOUT_MS)) 95 FORCEWAKE_ACK_TIMEOUT_MS))
93 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n", 96 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
@@ -95,72 +98,59 @@ fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
95} 98}
96 99
97static inline void 100static inline void
98fw_domain_put(const struct intel_uncore_forcewake_domain *d) 101fw_domain_put(const struct drm_i915_private *i915,
102 const struct intel_uncore_forcewake_domain *d)
99{ 103{
100 __raw_i915_write32(d->i915, d->reg_set, d->val_clear); 104 __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_clear);
101}
102
103static inline void
104fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
105{
106 /* something from same cacheline, but not from the set register */
107 if (i915_mmio_reg_valid(d->reg_post))
108 __raw_posting_read(d->i915, d->reg_post);
109} 105}
110 106
111static void 107static void
112fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 108fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
113{ 109{
114 struct intel_uncore_forcewake_domain *d; 110 struct intel_uncore_forcewake_domain *d;
111 unsigned int tmp;
115 112
116 for_each_fw_domain_masked(d, fw_domains, dev_priv) { 113 GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
117 fw_domain_wait_ack_clear(d); 114
118 fw_domain_get(d); 115 for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
116 fw_domain_wait_ack_clear(i915, d);
117 fw_domain_get(i915, d);
119 } 118 }
120 119
121 for_each_fw_domain_masked(d, fw_domains, dev_priv) 120 for_each_fw_domain_masked(d, fw_domains, i915, tmp)
122 fw_domain_wait_ack(d); 121 fw_domain_wait_ack(i915, d);
123 122
124 dev_priv->uncore.fw_domains_active |= fw_domains; 123 i915->uncore.fw_domains_active |= fw_domains;
125} 124}
126 125
127static void 126static void
128fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 127fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
129{ 128{
130 struct intel_uncore_forcewake_domain *d; 129 struct intel_uncore_forcewake_domain *d;
130 unsigned int tmp;
131 131
132 for_each_fw_domain_masked(d, fw_domains, dev_priv) { 132 GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
133 fw_domain_put(d);
134 fw_domain_posting_read(d);
135 }
136
137 dev_priv->uncore.fw_domains_active &= ~fw_domains;
138}
139 133
140static void 134 for_each_fw_domain_masked(d, fw_domains, i915, tmp)
141fw_domains_posting_read(struct drm_i915_private *dev_priv) 135 fw_domain_put(i915, d);
142{
143 struct intel_uncore_forcewake_domain *d;
144 136
145 /* No need to do for all, just do for first found */ 137 i915->uncore.fw_domains_active &= ~fw_domains;
146 for_each_fw_domain(d, dev_priv) {
147 fw_domain_posting_read(d);
148 break;
149 }
150} 138}
151 139
152static void 140static void
153fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 141fw_domains_reset(struct drm_i915_private *i915,
142 enum forcewake_domains fw_domains)
154{ 143{
155 struct intel_uncore_forcewake_domain *d; 144 struct intel_uncore_forcewake_domain *d;
145 unsigned int tmp;
156 146
157 if (dev_priv->uncore.fw_domains == 0) 147 if (!fw_domains)
158 return; 148 return;
159 149
160 for_each_fw_domain_masked(d, fw_domains, dev_priv) 150 GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
161 fw_domain_reset(d);
162 151
163 fw_domains_posting_read(dev_priv); 152 for_each_fw_domain_masked(d, fw_domains, i915, tmp)
153 fw_domain_reset(i915, d);
164} 154}
165 155
166static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) 156static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
@@ -236,7 +226,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
236{ 226{
237 struct intel_uncore_forcewake_domain *domain = 227 struct intel_uncore_forcewake_domain *domain =
238 container_of(timer, struct intel_uncore_forcewake_domain, timer); 228 container_of(timer, struct intel_uncore_forcewake_domain, timer);
239 struct drm_i915_private *dev_priv = domain->i915; 229 struct drm_i915_private *dev_priv =
230 container_of(domain, struct drm_i915_private, uncore.fw_domain[domain->id]);
240 unsigned long irqflags; 231 unsigned long irqflags;
241 232
242 assert_rpm_device_not_suspended(dev_priv); 233 assert_rpm_device_not_suspended(dev_priv);
@@ -266,9 +257,11 @@ static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
266 * timers are run before holding. 257 * timers are run before holding.
267 */ 258 */
268 while (1) { 259 while (1) {
260 unsigned int tmp;
261
269 active_domains = 0; 262 active_domains = 0;
270 263
271 for_each_fw_domain(domain, dev_priv) { 264 for_each_fw_domain(domain, dev_priv, tmp) {
272 if (hrtimer_cancel(&domain->timer) == 0) 265 if (hrtimer_cancel(&domain->timer) == 0)
273 continue; 266 continue;
274 267
@@ -277,7 +270,7 @@ static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
277 270
278 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 271 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
279 272
280 for_each_fw_domain(domain, dev_priv) { 273 for_each_fw_domain(domain, dev_priv, tmp) {
281 if (hrtimer_active(&domain->timer)) 274 if (hrtimer_active(&domain->timer))
282 active_domains |= domain->mask; 275 active_domains |= domain->mask;
283 } 276 }
@@ -300,7 +293,7 @@ static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
300 if (fw) 293 if (fw)
301 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw); 294 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
302 295
303 fw_domains_reset(dev_priv, FORCEWAKE_ALL); 296 fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains);
304 297
305 if (restore) { /* If reset with a user forcewake, try to restore */ 298 if (restore) { /* If reset with a user forcewake, try to restore */
306 if (fw) 299 if (fw)
@@ -457,13 +450,13 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
457 enum forcewake_domains fw_domains) 450 enum forcewake_domains fw_domains)
458{ 451{
459 struct intel_uncore_forcewake_domain *domain; 452 struct intel_uncore_forcewake_domain *domain;
453 unsigned int tmp;
460 454
461 fw_domains &= dev_priv->uncore.fw_domains; 455 fw_domains &= dev_priv->uncore.fw_domains;
462 456
463 for_each_fw_domain_masked(domain, fw_domains, dev_priv) { 457 for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp)
464 if (domain->wake_count++) 458 if (domain->wake_count++)
465 fw_domains &= ~domain->mask; 459 fw_domains &= ~domain->mask;
466 }
467 460
468 if (fw_domains) 461 if (fw_domains)
469 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 462 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
@@ -520,10 +513,11 @@ static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
520 enum forcewake_domains fw_domains) 513 enum forcewake_domains fw_domains)
521{ 514{
522 struct intel_uncore_forcewake_domain *domain; 515 struct intel_uncore_forcewake_domain *domain;
516 unsigned int tmp;
523 517
524 fw_domains &= dev_priv->uncore.fw_domains; 518 fw_domains &= dev_priv->uncore.fw_domains;
525 519
526 for_each_fw_domain_masked(domain, fw_domains, dev_priv) { 520 for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
527 if (WARN_ON(domain->wake_count == 0)) 521 if (WARN_ON(domain->wake_count == 0))
528 continue; 522 continue;
529 523
@@ -928,8 +922,11 @@ static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
928 enum forcewake_domains fw_domains) 922 enum forcewake_domains fw_domains)
929{ 923{
930 struct intel_uncore_forcewake_domain *domain; 924 struct intel_uncore_forcewake_domain *domain;
925 unsigned int tmp;
926
927 GEM_BUG_ON(fw_domains & ~dev_priv->uncore.fw_domains);
931 928
932 for_each_fw_domain_masked(domain, fw_domains, dev_priv) 929 for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp)
933 fw_domain_arm_timer(domain); 930 fw_domain_arm_timer(domain);
934 931
935 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 932 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
@@ -1141,41 +1138,27 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
1141 1138
1142 WARN_ON(d->wake_count); 1139 WARN_ON(d->wake_count);
1143 1140
1141 WARN_ON(!i915_mmio_reg_valid(reg_set));
1142 WARN_ON(!i915_mmio_reg_valid(reg_ack));
1143
1144 d->wake_count = 0; 1144 d->wake_count = 0;
1145 d->reg_set = reg_set; 1145 d->reg_set = reg_set;
1146 d->reg_ack = reg_ack; 1146 d->reg_ack = reg_ack;
1147 1147
1148 if (IS_GEN6(dev_priv)) {
1149 d->val_reset = 0;
1150 d->val_set = FORCEWAKE_KERNEL;
1151 d->val_clear = 0;
1152 } else {
1153 /* WaRsClearFWBitsAtReset:bdw,skl */
1154 d->val_reset = _MASKED_BIT_DISABLE(0xffff);
1155 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1156 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1157 }
1158
1159 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1160 d->reg_post = FORCEWAKE_ACK_VLV;
1161 else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
1162 d->reg_post = ECOBUS;
1163
1164 d->i915 = dev_priv;
1165 d->id = domain_id; 1148 d->id = domain_id;
1166 1149
1167 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER)); 1150 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1168 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER)); 1151 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
1169 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA)); 1152 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
1170 1153
1171 d->mask = 1 << domain_id; 1154 d->mask = BIT(domain_id);
1172 1155
1173 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1156 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1174 d->timer.function = intel_uncore_fw_release_timer; 1157 d->timer.function = intel_uncore_fw_release_timer;
1175 1158
1176 dev_priv->uncore.fw_domains |= (1 << domain_id); 1159 dev_priv->uncore.fw_domains |= BIT(domain_id);
1177 1160
1178 fw_domain_reset(d); 1161 fw_domain_reset(dev_priv, d);
1179} 1162}
1180 1163
1181static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) 1164static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
@@ -1183,6 +1166,17 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
1183 if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv)) 1166 if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
1184 return; 1167 return;
1185 1168
1169 if (IS_GEN6(dev_priv)) {
1170 dev_priv->uncore.fw_reset = 0;
1171 dev_priv->uncore.fw_set = FORCEWAKE_KERNEL;
1172 dev_priv->uncore.fw_clear = 0;
1173 } else {
1174 /* WaRsClearFWBitsAtReset:bdw,skl */
1175 dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff);
1176 dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1177 dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1178 }
1179
1186 if (IS_GEN9(dev_priv)) { 1180 if (IS_GEN9(dev_priv)) {
1187 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1181 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1188 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1182 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
@@ -1246,9 +1240,9 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
1246 FORCEWAKE_MT, FORCEWAKE_MT_ACK); 1240 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1247 1241
1248 spin_lock_irq(&dev_priv->uncore.lock); 1242 spin_lock_irq(&dev_priv->uncore.lock);
1249 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL); 1243 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER);
1250 ecobus = __raw_i915_read32(dev_priv, ECOBUS); 1244 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1251 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL); 1245 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_RENDER);
1252 spin_unlock_irq(&dev_priv->uncore.lock); 1246 spin_unlock_irq(&dev_priv->uncore.lock);
1253 1247
1254 if (!(ecobus & FORCEWAKE_MT_ENABLE)) { 1248 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_request.c b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
index 926b24c117d6..98b7aac41eec 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
@@ -291,8 +291,6 @@ static int begin_live_test(struct live_test *t,
291 return err; 291 return err;
292 } 292 }
293 293
294 i915_gem_retire_requests(i915);
295
296 i915->gpu_error.missed_irq_rings = 0; 294 i915->gpu_error.missed_irq_rings = 0;
297 t->reset_count = i915_reset_count(&i915->gpu_error); 295 t->reset_count = i915_reset_count(&i915->gpu_error);
298 296
@@ -303,7 +301,9 @@ static int end_live_test(struct live_test *t)
303{ 301{
304 struct drm_i915_private *i915 = t->i915; 302 struct drm_i915_private *i915 = t->i915;
305 303
306 if (wait_for(intel_engines_are_idle(i915), 1)) { 304 i915_gem_retire_requests(i915);
305
306 if (wait_for(intel_engines_are_idle(i915), 10)) {
307 pr_err("%s(%s): GPU not idle\n", t->func, t->name); 307 pr_err("%s(%s): GPU not idle\n", t->func, t->name);
308 return -EIO; 308 return -EIO;
309 } 309 }
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 6ec7c731a267..aa31d6c0cdfb 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -235,7 +235,6 @@ static void hang_fini(struct hang *h)
235 i915_gem_object_put(h->hws); 235 i915_gem_object_put(h->hws);
236 236
237 i915_gem_wait_for_idle(h->i915, I915_WAIT_LOCKED); 237 i915_gem_wait_for_idle(h->i915, I915_WAIT_LOCKED);
238 i915_gem_retire_requests(h->i915);
239} 238}
240 239
241static int igt_hang_sanitycheck(void *arg) 240static int igt_hang_sanitycheck(void *arg)
diff --git a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/selftests/mock_dmabuf.c
index 99da8f4ef497..302f7d103635 100644
--- a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c
+++ b/drivers/gpu/drm/i915/selftests/mock_dmabuf.c
@@ -129,10 +129,10 @@ static const struct dma_buf_ops mock_dmabuf_ops = {
129 .map_dma_buf = mock_map_dma_buf, 129 .map_dma_buf = mock_map_dma_buf,
130 .unmap_dma_buf = mock_unmap_dma_buf, 130 .unmap_dma_buf = mock_unmap_dma_buf,
131 .release = mock_dmabuf_release, 131 .release = mock_dmabuf_release,
132 .kmap = mock_dmabuf_kmap, 132 .map = mock_dmabuf_kmap,
133 .kmap_atomic = mock_dmabuf_kmap_atomic, 133 .map_atomic = mock_dmabuf_kmap_atomic,
134 .kunmap = mock_dmabuf_kunmap, 134 .unmap = mock_dmabuf_kunmap,
135 .kunmap_atomic = mock_dmabuf_kunmap_atomic, 135 .unmap_atomic = mock_dmabuf_kunmap_atomic,
136 .mmap = mock_dmabuf_mmap, 136 .mmap = mock_dmabuf_mmap,
137 .vmap = mock_dmabuf_vmap, 137 .vmap = mock_dmabuf_vmap,
138 .vunmap = mock_dmabuf_vunmap, 138 .vunmap = mock_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/i915/selftests/mock_drm.c b/drivers/gpu/drm/i915/selftests/mock_drm.c
index 113dec05c7dc..09c704153456 100644
--- a/drivers/gpu/drm/i915/selftests/mock_drm.c
+++ b/drivers/gpu/drm/i915/selftests/mock_drm.c
@@ -24,31 +24,50 @@
24 24
25#include "mock_drm.h" 25#include "mock_drm.h"
26 26
27static inline struct inode fake_inode(struct drm_i915_private *i915)
28{
29 return (struct inode){ .i_rdev = i915->drm.primary->index };
30}
31
32struct drm_file *mock_file(struct drm_i915_private *i915) 27struct drm_file *mock_file(struct drm_i915_private *i915)
33{ 28{
34 struct inode inode = fake_inode(i915); 29 struct file *filp;
35 struct file filp = {}; 30 struct inode *inode;
36 struct drm_file *file; 31 struct drm_file *file;
37 int err; 32 int err;
38 33
39 err = drm_open(&inode, &filp); 34 inode = kzalloc(sizeof(*inode), GFP_KERNEL);
40 if (unlikely(err)) 35 if (!inode) {
41 return ERR_PTR(err); 36 err = -ENOMEM;
37 goto err;
38 }
39
40 inode->i_rdev = i915->drm.primary->index;
42 41
43 file = filp.private_data; 42 filp = kzalloc(sizeof(*filp), GFP_KERNEL);
43 if (!filp) {
44 err = -ENOMEM;
45 goto err_inode;
46 }
47
48 err = drm_open(inode, filp);
49 if (err)
50 goto err_filp;
51
52 file = filp->private_data;
53 memset(&file->filp, POISON_INUSE, sizeof(file->filp));
44 file->authenticated = true; 54 file->authenticated = true;
55
56 kfree(filp);
57 kfree(inode);
45 return file; 58 return file;
59
60err_filp:
61 kfree(filp);
62err_inode:
63 kfree(inode);
64err:
65 return ERR_PTR(err);
46} 66}
47 67
48void mock_file_free(struct drm_i915_private *i915, struct drm_file *file) 68void mock_file_free(struct drm_i915_private *i915, struct drm_file *file)
49{ 69{
50 struct inode inode = fake_inode(i915);
51 struct file filp = { .private_data = file }; 70 struct file filp = { .private_data = file };
52 71
53 drm_release(&inode, &filp); 72 drm_release(NULL, &filp);
54} 73}
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 8d5ba037064c..0ad624a1db90 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -118,7 +118,6 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
118 ring->vaddr = (void *)(ring + 1); 118 ring->vaddr = (void *)(ring + 1);
119 119
120 INIT_LIST_HEAD(&ring->request_list); 120 INIT_LIST_HEAD(&ring->request_list);
121 ring->last_retired_head = -1;
122 intel_ring_update_space(ring); 121 intel_ring_update_space(ring);
123 122
124 return ring; 123 return ring;
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c
index 0e8d2e7f8c70..8097e3693ec4 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.c
+++ b/drivers/gpu/drm/i915/selftests/mock_request.c
@@ -35,7 +35,7 @@ mock_request(struct intel_engine_cs *engine,
35 35
36 /* NB the i915->requests slab cache is enlarged to fit mock_request */ 36 /* NB the i915->requests slab cache is enlarged to fit mock_request */
37 request = i915_gem_request_alloc(engine, context); 37 request = i915_gem_request_alloc(engine, context);
38 if (!request) 38 if (IS_ERR(request))
39 return NULL; 39 return NULL;
40 40
41 mock = container_of(request, typeof(*mock), base); 41 mock = container_of(request, typeof(*mock), base);
diff --git a/drivers/gpu/drm/i915/selftests/scatterlist.c b/drivers/gpu/drm/i915/selftests/scatterlist.c
index eb2cda8e2b9f..1cc5d2931753 100644
--- a/drivers/gpu/drm/i915/selftests/scatterlist.c
+++ b/drivers/gpu/drm/i915/selftests/scatterlist.c
@@ -189,6 +189,13 @@ static unsigned int random(unsigned long n,
189 return 1 + (prandom_u32_state(rnd) % 1024); 189 return 1 + (prandom_u32_state(rnd) % 1024);
190} 190}
191 191
192static inline bool page_contiguous(struct page *first,
193 struct page *last,
194 unsigned long npages)
195{
196 return first + npages == last;
197}
198
192static int alloc_table(struct pfn_table *pt, 199static int alloc_table(struct pfn_table *pt,
193 unsigned long count, unsigned long max, 200 unsigned long count, unsigned long max,
194 npages_fn_t npages_fn, 201 npages_fn_t npages_fn,
@@ -216,7 +223,9 @@ static int alloc_table(struct pfn_table *pt,
216 unsigned long npages = npages_fn(n, count, rnd); 223 unsigned long npages = npages_fn(n, count, rnd);
217 224
218 /* Nobody expects the Sparse Memmap! */ 225 /* Nobody expects the Sparse Memmap! */
219 if (pfn_to_page(pfn + npages) != pfn_to_page(pfn) + npages) { 226 if (!page_contiguous(pfn_to_page(pfn),
227 pfn_to_page(pfn + npages),
228 npages)) {
220 sg_free_table(&pt->st); 229 sg_free_table(&pt->st);
221 return -ENOSPC; 230 return -ENOSPC;
222 } 231 }
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index f2c9ae822149..c9e439c82241 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -31,13 +31,6 @@ config DRM_IMX_LDB
31 Choose this to enable the internal LVDS Display Bridge (LDB) 31 Choose this to enable the internal LVDS Display Bridge (LDB)
32 found on i.MX53 and i.MX6 processors. 32 found on i.MX53 and i.MX6 processors.
33 33
34config DRM_IMX_IPUV3
35 tristate
36 depends on DRM_IMX
37 depends on IMX_IPUV3_CORE
38 default y if DRM_IMX=y
39 default m if DRM_IMX=m
40
41config DRM_IMX_HDMI 34config DRM_IMX_HDMI
42 tristate "Freescale i.MX DRM HDMI" 35 tristate "Freescale i.MX DRM HDMI"
43 select DRM_DW_HDMI 36 select DRM_DW_HDMI
diff --git a/drivers/gpu/drm/imx/Makefile b/drivers/gpu/drm/imx/Makefile
index f3ecd8903d97..16ecef33e008 100644
--- a/drivers/gpu/drm/imx/Makefile
+++ b/drivers/gpu/drm/imx/Makefile
@@ -1,5 +1,5 @@
1 1
2imxdrm-objs := imx-drm-core.o 2imxdrm-objs := imx-drm-core.o ipuv3-crtc.o ipuv3-plane.o
3 3
4obj-$(CONFIG_DRM_IMX) += imxdrm.o 4obj-$(CONFIG_DRM_IMX) += imxdrm.o
5 5
@@ -7,6 +7,5 @@ obj-$(CONFIG_DRM_IMX_PARALLEL_DISPLAY) += parallel-display.o
7obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o 7obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
8obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o 8obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
9 9
10imx-ipuv3-crtc-objs := ipuv3-crtc.o ipuv3-plane.o
11obj-$(CONFIG_DRM_IMX_IPUV3) += imx-ipuv3-crtc.o 10obj-$(CONFIG_DRM_IMX_IPUV3) += imx-ipuv3-crtc.o
12obj-$(CONFIG_DRM_IMX_HDMI) += dw_hdmi-imx.o 11obj-$(CONFIG_DRM_IMX_HDMI) += dw_hdmi-imx.o
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 1888bf3920fc..50add2f9e250 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -422,7 +422,23 @@ static struct platform_driver imx_drm_pdrv = {
422 .of_match_table = imx_drm_dt_ids, 422 .of_match_table = imx_drm_dt_ids,
423 }, 423 },
424}; 424};
425module_platform_driver(imx_drm_pdrv); 425
426static struct platform_driver * const drivers[] = {
427 &imx_drm_pdrv,
428 &ipu_drm_driver,
429};
430
431static int __init imx_drm_init(void)
432{
433 return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
434}
435module_init(imx_drm_init);
436
437static void __exit imx_drm_exit(void)
438{
439 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
440}
441module_exit(imx_drm_exit);
426 442
427MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); 443MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
428MODULE_DESCRIPTION("i.MX drm driver core"); 444MODULE_DESCRIPTION("i.MX drm driver core");
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 295434b199db..f6dd64be9cd5 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -29,6 +29,8 @@ int imx_drm_init_drm(struct platform_device *pdev,
29 int preferred_bpp); 29 int preferred_bpp);
30int imx_drm_exit_drm(void); 30int imx_drm_exit_drm(void);
31 31
32extern struct platform_driver ipu_drm_driver;
33
32void imx_drm_mode_config_init(struct drm_device *drm); 34void imx_drm_mode_config_init(struct drm_device *drm);
33 35
34struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb); 36struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 88cd11d30134..8fb801fab039 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -647,7 +647,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
647 647
648 for_each_child_of_node(np, child) { 648 for_each_child_of_node(np, child) {
649 struct imx_ldb_channel *channel; 649 struct imx_ldb_channel *channel;
650 struct device_node *ep;
651 int bus_format; 650 int bus_format;
652 651
653 ret = of_property_read_u32(child, "reg", &i); 652 ret = of_property_read_u32(child, "reg", &i);
@@ -671,27 +670,11 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
671 * The output port is port@4 with an external 4-port mux or 670 * The output port is port@4 with an external 4-port mux or
672 * port@2 with the internal 2-port mux. 671 * port@2 with the internal 2-port mux.
673 */ 672 */
674 ep = of_graph_get_endpoint_by_regs(child, 673 ret = drm_of_find_panel_or_bridge(child,
675 imx_ldb->lvds_mux ? 4 : 2, 674 imx_ldb->lvds_mux ? 4 : 2, 0,
676 -1); 675 &channel->panel, &channel->bridge);
677 if (ep) { 676 if (ret)
678 struct device_node *remote; 677 return ret;
679
680 remote = of_graph_get_remote_port_parent(ep);
681 of_node_put(ep);
682 if (remote) {
683 channel->panel = of_drm_find_panel(remote);
684 channel->bridge = of_drm_find_bridge(remote);
685 } else
686 return -EPROBE_DEFER;
687 of_node_put(remote);
688
689 if (!channel->panel && !channel->bridge) {
690 dev_err(dev, "panel/bridge not found: %s\n",
691 remote->full_name);
692 return -EPROBE_DEFER;
693 }
694 }
695 678
696 /* panel ddc only if there is no bridge */ 679 /* panel ddc only if there is no bridge */
697 if (!channel->bridge) { 680 if (!channel->bridge) {
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index dab9d50ffd8c..5456c15d962c 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -465,16 +465,10 @@ static int ipu_drm_remove(struct platform_device *pdev)
465 return 0; 465 return 0;
466} 466}
467 467
468static struct platform_driver ipu_drm_driver = { 468struct platform_driver ipu_drm_driver = {
469 .driver = { 469 .driver = {
470 .name = "imx-ipuv3-crtc", 470 .name = "imx-ipuv3-crtc",
471 }, 471 },
472 .probe = ipu_drm_probe, 472 .probe = ipu_drm_probe,
473 .remove = ipu_drm_remove, 473 .remove = ipu_drm_remove,
474}; 474};
475module_platform_driver(ipu_drm_driver);
476
477MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
478MODULE_DESCRIPTION(DRIVER_DESC);
479MODULE_LICENSE("GPL");
480MODULE_ALIAS("platform:imx-ipuv3-crtc");
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index d5c06fd89f90..636031a30e17 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -19,10 +19,10 @@
19#include <drm/drm_atomic_helper.h> 19#include <drm/drm_atomic_helper.h>
20#include <drm/drm_fb_helper.h> 20#include <drm/drm_fb_helper.h>
21#include <drm/drm_crtc_helper.h> 21#include <drm/drm_crtc_helper.h>
22#include <drm/drm_of.h>
22#include <drm/drm_panel.h> 23#include <drm/drm_panel.h>
23#include <linux/videodev2.h> 24#include <linux/videodev2.h>
24#include <video/of_display_timing.h> 25#include <video/of_display_timing.h>
25#include <linux/of_graph.h>
26 26
27#include "imx-drm.h" 27#include "imx-drm.h"
28 28
@@ -208,7 +208,6 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
208{ 208{
209 struct drm_device *drm = data; 209 struct drm_device *drm = data;
210 struct device_node *np = dev->of_node; 210 struct device_node *np = dev->of_node;
211 struct device_node *ep;
212 const u8 *edidp; 211 const u8 *edidp;
213 struct imx_parallel_display *imxpd; 212 struct imx_parallel_display *imxpd;
214 int ret; 213 int ret;
@@ -237,36 +236,9 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
237 imxpd->bus_format = bus_format; 236 imxpd->bus_format = bus_format;
238 237
239 /* port@1 is the output port */ 238 /* port@1 is the output port */
240 ep = of_graph_get_endpoint_by_regs(np, 1, -1); 239 ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, &imxpd->bridge);
241 if (ep) { 240 if (ret)
242 struct device_node *remote; 241 return ret;
243
244 remote = of_graph_get_remote_port_parent(ep);
245 if (!remote) {
246 dev_warn(dev, "endpoint %s not connected\n",
247 ep->full_name);
248 of_node_put(ep);
249 return -ENODEV;
250 }
251 of_node_put(ep);
252
253 imxpd->panel = of_drm_find_panel(remote);
254 if (imxpd->panel) {
255 dev_dbg(dev, "found panel %s\n", remote->full_name);
256 } else {
257 imxpd->bridge = of_drm_find_bridge(remote);
258 if (imxpd->bridge)
259 dev_dbg(dev, "found bridge %s\n",
260 remote->full_name);
261 }
262 if (!imxpd->panel && !imxpd->bridge) {
263 dev_dbg(dev, "waiting for panel or bridge %s\n",
264 remote->full_name);
265 of_node_put(remote);
266 return -EPROBE_DEFER;
267 }
268 of_node_put(remote);
269 }
270 242
271 imxpd->dev = dev; 243 imxpd->dev = dev;
272 244
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index c70310206ac5..a14d7d64d7b1 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -35,18 +35,28 @@
35#define DISP_REG_OVL_PITCH(n) (0x0044 + 0x20 * (n)) 35#define DISP_REG_OVL_PITCH(n) (0x0044 + 0x20 * (n))
36#define DISP_REG_OVL_RDMA_CTRL(n) (0x00c0 + 0x20 * (n)) 36#define DISP_REG_OVL_RDMA_CTRL(n) (0x00c0 + 0x20 * (n))
37#define DISP_REG_OVL_RDMA_GMC(n) (0x00c8 + 0x20 * (n)) 37#define DISP_REG_OVL_RDMA_GMC(n) (0x00c8 + 0x20 * (n))
38#define DISP_REG_OVL_ADDR(n) (0x0f40 + 0x20 * (n)) 38#define DISP_REG_OVL_ADDR_MT2701 0x0040
39#define DISP_REG_OVL_ADDR_MT8173 0x0f40
40#define DISP_REG_OVL_ADDR(ovl, n) ((ovl)->data->addr + 0x20 * (n))
39 41
40#define OVL_RDMA_MEM_GMC 0x40402020 42#define OVL_RDMA_MEM_GMC 0x40402020
41 43
42#define OVL_CON_BYTE_SWAP BIT(24) 44#define OVL_CON_BYTE_SWAP BIT(24)
43#define OVL_CON_CLRFMT_RGB565 (0 << 12) 45#define OVL_CON_CLRFMT_RGB (1 << 12)
44#define OVL_CON_CLRFMT_RGB888 (1 << 12)
45#define OVL_CON_CLRFMT_RGBA8888 (2 << 12) 46#define OVL_CON_CLRFMT_RGBA8888 (2 << 12)
46#define OVL_CON_CLRFMT_ARGB8888 (3 << 12) 47#define OVL_CON_CLRFMT_ARGB8888 (3 << 12)
48#define OVL_CON_CLRFMT_RGB565(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
49 0 : OVL_CON_CLRFMT_RGB)
50#define OVL_CON_CLRFMT_RGB888(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
51 OVL_CON_CLRFMT_RGB : 0)
47#define OVL_CON_AEN BIT(8) 52#define OVL_CON_AEN BIT(8)
48#define OVL_CON_ALPHA 0xff 53#define OVL_CON_ALPHA 0xff
49 54
55struct mtk_disp_ovl_data {
56 unsigned int addr;
57 bool fmt_rgb565_is_0;
58};
59
50/** 60/**
51 * struct mtk_disp_ovl - DISP_OVL driver structure 61 * struct mtk_disp_ovl - DISP_OVL driver structure
52 * @ddp_comp - structure containing type enum and hardware resources 62 * @ddp_comp - structure containing type enum and hardware resources
@@ -55,8 +65,14 @@
55struct mtk_disp_ovl { 65struct mtk_disp_ovl {
56 struct mtk_ddp_comp ddp_comp; 66 struct mtk_ddp_comp ddp_comp;
57 struct drm_crtc *crtc; 67 struct drm_crtc *crtc;
68 const struct mtk_disp_ovl_data *data;
58}; 69};
59 70
71static inline struct mtk_disp_ovl *comp_to_ovl(struct mtk_ddp_comp *comp)
72{
73 return container_of(comp, struct mtk_disp_ovl, ddp_comp);
74}
75
60static irqreturn_t mtk_disp_ovl_irq_handler(int irq, void *dev_id) 76static irqreturn_t mtk_disp_ovl_irq_handler(int irq, void *dev_id)
61{ 77{
62 struct mtk_disp_ovl *priv = dev_id; 78 struct mtk_disp_ovl *priv = dev_id;
@@ -76,20 +92,18 @@ static irqreturn_t mtk_disp_ovl_irq_handler(int irq, void *dev_id)
76static void mtk_ovl_enable_vblank(struct mtk_ddp_comp *comp, 92static void mtk_ovl_enable_vblank(struct mtk_ddp_comp *comp,
77 struct drm_crtc *crtc) 93 struct drm_crtc *crtc)
78{ 94{
79 struct mtk_disp_ovl *priv = container_of(comp, struct mtk_disp_ovl, 95 struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
80 ddp_comp);
81 96
82 priv->crtc = crtc; 97 ovl->crtc = crtc;
83 writel(0x0, comp->regs + DISP_REG_OVL_INTSTA); 98 writel(0x0, comp->regs + DISP_REG_OVL_INTSTA);
84 writel_relaxed(OVL_FME_CPL_INT, comp->regs + DISP_REG_OVL_INTEN); 99 writel_relaxed(OVL_FME_CPL_INT, comp->regs + DISP_REG_OVL_INTEN);
85} 100}
86 101
87static void mtk_ovl_disable_vblank(struct mtk_ddp_comp *comp) 102static void mtk_ovl_disable_vblank(struct mtk_ddp_comp *comp)
88{ 103{
89 struct mtk_disp_ovl *priv = container_of(comp, struct mtk_disp_ovl, 104 struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
90 ddp_comp);
91 105
92 priv->crtc = NULL; 106 ovl->crtc = NULL;
93 writel_relaxed(0x0, comp->regs + DISP_REG_OVL_INTEN); 107 writel_relaxed(0x0, comp->regs + DISP_REG_OVL_INTEN);
94} 108}
95 109
@@ -138,18 +152,18 @@ static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx)
138 writel(0x0, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx)); 152 writel(0x0, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx));
139} 153}
140 154
141static unsigned int ovl_fmt_convert(unsigned int fmt) 155static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
142{ 156{
143 switch (fmt) { 157 switch (fmt) {
144 default: 158 default:
145 case DRM_FORMAT_RGB565: 159 case DRM_FORMAT_RGB565:
146 return OVL_CON_CLRFMT_RGB565; 160 return OVL_CON_CLRFMT_RGB565(ovl);
147 case DRM_FORMAT_BGR565: 161 case DRM_FORMAT_BGR565:
148 return OVL_CON_CLRFMT_RGB565 | OVL_CON_BYTE_SWAP; 162 return OVL_CON_CLRFMT_RGB565(ovl) | OVL_CON_BYTE_SWAP;
149 case DRM_FORMAT_RGB888: 163 case DRM_FORMAT_RGB888:
150 return OVL_CON_CLRFMT_RGB888; 164 return OVL_CON_CLRFMT_RGB888(ovl);
151 case DRM_FORMAT_BGR888: 165 case DRM_FORMAT_BGR888:
152 return OVL_CON_CLRFMT_RGB888 | OVL_CON_BYTE_SWAP; 166 return OVL_CON_CLRFMT_RGB888(ovl) | OVL_CON_BYTE_SWAP;
153 case DRM_FORMAT_RGBX8888: 167 case DRM_FORMAT_RGBX8888:
154 case DRM_FORMAT_RGBA8888: 168 case DRM_FORMAT_RGBA8888:
155 return OVL_CON_CLRFMT_ARGB8888; 169 return OVL_CON_CLRFMT_ARGB8888;
@@ -168,6 +182,7 @@ static unsigned int ovl_fmt_convert(unsigned int fmt)
168static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, 182static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
169 struct mtk_plane_state *state) 183 struct mtk_plane_state *state)
170{ 184{
185 struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
171 struct mtk_plane_pending_state *pending = &state->pending; 186 struct mtk_plane_pending_state *pending = &state->pending;
172 unsigned int addr = pending->addr; 187 unsigned int addr = pending->addr;
173 unsigned int pitch = pending->pitch & 0xffff; 188 unsigned int pitch = pending->pitch & 0xffff;
@@ -179,7 +194,7 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
179 if (!pending->enable) 194 if (!pending->enable)
180 mtk_ovl_layer_off(comp, idx); 195 mtk_ovl_layer_off(comp, idx);
181 196
182 con = ovl_fmt_convert(fmt); 197 con = ovl_fmt_convert(ovl, fmt);
183 if (idx != 0) 198 if (idx != 0)
184 con |= OVL_CON_AEN | OVL_CON_ALPHA; 199 con |= OVL_CON_AEN | OVL_CON_ALPHA;
185 200
@@ -187,7 +202,7 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
187 writel_relaxed(pitch, comp->regs + DISP_REG_OVL_PITCH(idx)); 202 writel_relaxed(pitch, comp->regs + DISP_REG_OVL_PITCH(idx));
188 writel_relaxed(src_size, comp->regs + DISP_REG_OVL_SRC_SIZE(idx)); 203 writel_relaxed(src_size, comp->regs + DISP_REG_OVL_SRC_SIZE(idx));
189 writel_relaxed(offset, comp->regs + DISP_REG_OVL_OFFSET(idx)); 204 writel_relaxed(offset, comp->regs + DISP_REG_OVL_OFFSET(idx));
190 writel_relaxed(addr, comp->regs + DISP_REG_OVL_ADDR(idx)); 205 writel_relaxed(addr, comp->regs + DISP_REG_OVL_ADDR(ovl, idx));
191 206
192 if (pending->enable) 207 if (pending->enable)
193 mtk_ovl_layer_on(comp, idx); 208 mtk_ovl_layer_on(comp, idx);
@@ -264,6 +279,8 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
264 return ret; 279 return ret;
265 } 280 }
266 281
282 priv->data = of_device_get_match_data(dev);
283
267 platform_set_drvdata(pdev, priv); 284 platform_set_drvdata(pdev, priv);
268 285
269 ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler, 286 ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
@@ -287,8 +304,21 @@ static int mtk_disp_ovl_remove(struct platform_device *pdev)
287 return 0; 304 return 0;
288} 305}
289 306
307static const struct mtk_disp_ovl_data mt2701_ovl_driver_data = {
308 .addr = DISP_REG_OVL_ADDR_MT2701,
309 .fmt_rgb565_is_0 = false,
310};
311
312static const struct mtk_disp_ovl_data mt8173_ovl_driver_data = {
313 .addr = DISP_REG_OVL_ADDR_MT8173,
314 .fmt_rgb565_is_0 = true,
315};
316
290static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = { 317static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = {
291 { .compatible = "mediatek,mt8173-disp-ovl", }, 318 { .compatible = "mediatek,mt2701-disp-ovl",
319 .data = &mt2701_ovl_driver_data},
320 { .compatible = "mediatek,mt8173-disp-ovl",
321 .data = &mt8173_ovl_driver_data},
292 {}, 322 {},
293}; 323};
294MODULE_DEVICE_TABLE(of, mtk_disp_ovl_driver_dt_match); 324MODULE_DEVICE_TABLE(of, mtk_disp_ovl_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 0df05f95b916..b68a51376f83 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -38,6 +38,11 @@
38#define RDMA_FIFO_UNDERFLOW_EN BIT(31) 38#define RDMA_FIFO_UNDERFLOW_EN BIT(31)
39#define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16) 39#define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16)
40#define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16) 40#define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16)
41#define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size)
42
43struct mtk_disp_rdma_data {
44 unsigned int fifo_size;
45};
41 46
42/** 47/**
43 * struct mtk_disp_rdma - DISP_RDMA driver structure 48 * struct mtk_disp_rdma - DISP_RDMA driver structure
@@ -47,8 +52,14 @@
47struct mtk_disp_rdma { 52struct mtk_disp_rdma {
48 struct mtk_ddp_comp ddp_comp; 53 struct mtk_ddp_comp ddp_comp;
49 struct drm_crtc *crtc; 54 struct drm_crtc *crtc;
55 const struct mtk_disp_rdma_data *data;
50}; 56};
51 57
58static inline struct mtk_disp_rdma *comp_to_rdma(struct mtk_ddp_comp *comp)
59{
60 return container_of(comp, struct mtk_disp_rdma, ddp_comp);
61}
62
52static irqreturn_t mtk_disp_rdma_irq_handler(int irq, void *dev_id) 63static irqreturn_t mtk_disp_rdma_irq_handler(int irq, void *dev_id)
53{ 64{
54 struct mtk_disp_rdma *priv = dev_id; 65 struct mtk_disp_rdma *priv = dev_id;
@@ -77,20 +88,18 @@ static void rdma_update_bits(struct mtk_ddp_comp *comp, unsigned int reg,
77static void mtk_rdma_enable_vblank(struct mtk_ddp_comp *comp, 88static void mtk_rdma_enable_vblank(struct mtk_ddp_comp *comp,
78 struct drm_crtc *crtc) 89 struct drm_crtc *crtc)
79{ 90{
80 struct mtk_disp_rdma *priv = container_of(comp, struct mtk_disp_rdma, 91 struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
81 ddp_comp);
82 92
83 priv->crtc = crtc; 93 rdma->crtc = crtc;
84 rdma_update_bits(comp, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT, 94 rdma_update_bits(comp, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT,
85 RDMA_FRAME_END_INT); 95 RDMA_FRAME_END_INT);
86} 96}
87 97
88static void mtk_rdma_disable_vblank(struct mtk_ddp_comp *comp) 98static void mtk_rdma_disable_vblank(struct mtk_ddp_comp *comp)
89{ 99{
90 struct mtk_disp_rdma *priv = container_of(comp, struct mtk_disp_rdma, 100 struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
91 ddp_comp);
92 101
93 priv->crtc = NULL; 102 rdma->crtc = NULL;
94 rdma_update_bits(comp, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT, 0); 103 rdma_update_bits(comp, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT, 0);
95} 104}
96 105
@@ -111,6 +120,7 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
111{ 120{
112 unsigned int threshold; 121 unsigned int threshold;
113 unsigned int reg; 122 unsigned int reg;
123 struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
114 124
115 rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, 0xfff, width); 125 rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, 0xfff, width);
116 rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_1, 0xfffff, height); 126 rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_1, 0xfffff, height);
@@ -123,7 +133,7 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
123 */ 133 */
124 threshold = width * height * vrefresh * 4 * 7 / 1000000; 134 threshold = width * height * vrefresh * 4 * 7 / 1000000;
125 reg = RDMA_FIFO_UNDERFLOW_EN | 135 reg = RDMA_FIFO_UNDERFLOW_EN |
126 RDMA_FIFO_PSEUDO_SIZE(SZ_8K) | 136 RDMA_FIFO_PSEUDO_SIZE(RDMA_FIFO_SIZE(rdma)) |
127 RDMA_OUTPUT_VALID_FIFO_THRESHOLD(threshold); 137 RDMA_OUTPUT_VALID_FIFO_THRESHOLD(threshold);
128 writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON); 138 writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON);
129} 139}
@@ -208,6 +218,8 @@ static int mtk_disp_rdma_probe(struct platform_device *pdev)
208 return ret; 218 return ret;
209 } 219 }
210 220
221 priv->data = of_device_get_match_data(dev);
222
211 platform_set_drvdata(pdev, priv); 223 platform_set_drvdata(pdev, priv);
212 224
213 ret = component_add(dev, &mtk_disp_rdma_component_ops); 225 ret = component_add(dev, &mtk_disp_rdma_component_ops);
@@ -224,8 +236,19 @@ static int mtk_disp_rdma_remove(struct platform_device *pdev)
224 return 0; 236 return 0;
225} 237}
226 238
239static const struct mtk_disp_rdma_data mt2701_rdma_driver_data = {
240 .fifo_size = SZ_4K,
241};
242
243static const struct mtk_disp_rdma_data mt8173_rdma_driver_data = {
244 .fifo_size = SZ_8K,
245};
246
227static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = { 247static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = {
228 { .compatible = "mediatek,mt8173-disp-rdma", }, 248 { .compatible = "mediatek,mt2701-disp-rdma",
249 .data = &mt2701_rdma_driver_data},
250 { .compatible = "mediatek,mt8173-disp-rdma",
251 .data = &mt8173_rdma_driver_data},
229 {}, 252 {},
230}; 253};
231MODULE_DEVICE_TABLE(of, mtk_disp_rdma_driver_dt_match); 254MODULE_DEVICE_TABLE(of, mtk_disp_rdma_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 3bd3bd688d1a..32ca351ecd09 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -661,7 +661,7 @@ static int mtk_dpi_probe(struct platform_device *pdev)
661 struct device *dev = &pdev->dev; 661 struct device *dev = &pdev->dev;
662 struct mtk_dpi *dpi; 662 struct mtk_dpi *dpi;
663 struct resource *mem; 663 struct resource *mem;
664 struct device_node *ep, *bridge_node = NULL; 664 struct device_node *bridge_node;
665 int comp_id; 665 int comp_id;
666 int ret; 666 int ret;
667 667
@@ -706,15 +706,9 @@ static int mtk_dpi_probe(struct platform_device *pdev)
706 return -EINVAL; 706 return -EINVAL;
707 } 707 }
708 708
709 ep = of_graph_get_next_endpoint(dev->of_node, NULL); 709 bridge_node = of_graph_get_remote_node(dev->of_node, 0, 0);
710 if (ep) { 710 if (!bridge_node)
711 bridge_node = of_graph_get_remote_port_parent(ep);
712 of_node_put(ep);
713 }
714 if (!bridge_node) {
715 dev_err(dev, "Failed to find bridge node\n");
716 return -ENODEV; 711 return -ENODEV;
717 }
718 712
719 dev_info(dev, "Found bridge node: %s\n", bridge_node->full_name); 713 dev_info(dev, "Found bridge node: %s\n", bridge_node->full_name);
720 714
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 69982f5a6198..6b08774e5501 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -327,6 +327,42 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
327 pm_runtime_put(drm->dev); 327 pm_runtime_put(drm->dev);
328} 328}
329 329
330static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
331{
332 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
333 struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
334 struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
335 unsigned int i;
336
337 /*
338 * TODO: instead of updating the registers here, we should prepare
339 * working registers in atomic_commit and let the hardware command
340 * queue update module registers on vblank.
341 */
342 if (state->pending_config) {
343 mtk_ddp_comp_config(ovl, state->pending_width,
344 state->pending_height,
345 state->pending_vrefresh, 0);
346
347 state->pending_config = false;
348 }
349
350 if (mtk_crtc->pending_planes) {
351 for (i = 0; i < OVL_LAYER_NR; i++) {
352 struct drm_plane *plane = &mtk_crtc->planes[i];
353 struct mtk_plane_state *plane_state;
354
355 plane_state = to_mtk_plane_state(plane->state);
356
357 if (plane_state->pending.config) {
358 mtk_ddp_comp_layer_config(ovl, i, plane_state);
359 plane_state->pending.config = false;
360 }
361 }
362 mtk_crtc->pending_planes = false;
363 }
364}
365
330static void mtk_drm_crtc_enable(struct drm_crtc *crtc) 366static void mtk_drm_crtc_enable(struct drm_crtc *crtc)
331{ 367{
332 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 368 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
@@ -403,6 +439,7 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
403 struct drm_crtc_state *old_crtc_state) 439 struct drm_crtc_state *old_crtc_state)
404{ 440{
405 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 441 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
442 struct mtk_drm_private *priv = crtc->dev->dev_private;
406 unsigned int pending_planes = 0; 443 unsigned int pending_planes = 0;
407 int i; 444 int i;
408 445
@@ -424,6 +461,12 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
424 if (crtc->state->color_mgmt_changed) 461 if (crtc->state->color_mgmt_changed)
425 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) 462 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
426 mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state); 463 mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state);
464
465 if (priv->data->shadow_register) {
466 mtk_disp_mutex_acquire(mtk_crtc->mutex);
467 mtk_crtc_ddp_config(crtc);
468 mtk_disp_mutex_release(mtk_crtc->mutex);
469 }
427} 470}
428 471
429static const struct drm_crtc_funcs mtk_crtc_funcs = { 472static const struct drm_crtc_funcs mtk_crtc_funcs = {
@@ -471,36 +514,10 @@ err_cleanup_crtc:
471void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl) 514void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl)
472{ 515{
473 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 516 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
474 struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); 517 struct mtk_drm_private *priv = crtc->dev->dev_private;
475 unsigned int i;
476 518
477 /* 519 if (!priv->data->shadow_register)
478 * TODO: instead of updating the registers here, we should prepare 520 mtk_crtc_ddp_config(crtc);
479 * working registers in atomic_commit and let the hardware command
480 * queue update module registers on vblank.
481 */
482 if (state->pending_config) {
483 mtk_ddp_comp_config(ovl, state->pending_width,
484 state->pending_height,
485 state->pending_vrefresh, 0);
486
487 state->pending_config = false;
488 }
489
490 if (mtk_crtc->pending_planes) {
491 for (i = 0; i < OVL_LAYER_NR; i++) {
492 struct drm_plane *plane = &mtk_crtc->planes[i];
493 struct mtk_plane_state *plane_state;
494
495 plane_state = to_mtk_plane_state(plane->state);
496
497 if (plane_state->pending.config) {
498 mtk_ddp_comp_layer_config(ovl, i, plane_state);
499 plane_state->pending.config = false;
500 }
501 }
502 mtk_crtc->pending_planes = false;
503 }
504 521
505 mtk_drm_finish_page_flip(mtk_crtc); 522 mtk_drm_finish_page_flip(mtk_crtc);
506} 523}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
index 17ba9355a49c..8130f3dab661 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/clk.h> 14#include <linux/clk.h>
15#include <linux/iopoll.h>
15#include <linux/module.h> 16#include <linux/module.h>
16#include <linux/of_device.h> 17#include <linux/of_device.h>
17#include <linux/platform_device.h> 18#include <linux/platform_device.h>
@@ -31,26 +32,40 @@
31#define DISP_REG_CONFIG_DISP_RDMA1_MOUT_EN 0x0c8 32#define DISP_REG_CONFIG_DISP_RDMA1_MOUT_EN 0x0c8
32#define DISP_REG_CONFIG_MMSYS_CG_CON0 0x100 33#define DISP_REG_CONFIG_MMSYS_CG_CON0 0x100
33 34
35#define DISP_REG_CONFIG_DISP_OVL_MOUT_EN 0x030
36#define DISP_REG_CONFIG_OUT_SEL 0x04c
37#define DISP_REG_CONFIG_DSI_SEL 0x050
38
34#define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n)) 39#define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n))
40#define DISP_REG_MUTEX(n) (0x24 + 0x20 * (n))
35#define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n)) 41#define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n))
36#define DISP_REG_MUTEX_MOD(n) (0x2c + 0x20 * (n)) 42#define DISP_REG_MUTEX_MOD(n) (0x2c + 0x20 * (n))
37#define DISP_REG_MUTEX_SOF(n) (0x30 + 0x20 * (n)) 43#define DISP_REG_MUTEX_SOF(n) (0x30 + 0x20 * (n))
38 44
39#define MUTEX_MOD_DISP_OVL0 BIT(11) 45#define INT_MUTEX BIT(1)
40#define MUTEX_MOD_DISP_OVL1 BIT(12) 46
41#define MUTEX_MOD_DISP_RDMA0 BIT(13) 47#define MT8173_MUTEX_MOD_DISP_OVL0 BIT(11)
42#define MUTEX_MOD_DISP_RDMA1 BIT(14) 48#define MT8173_MUTEX_MOD_DISP_OVL1 BIT(12)
43#define MUTEX_MOD_DISP_RDMA2 BIT(15) 49#define MT8173_MUTEX_MOD_DISP_RDMA0 BIT(13)
44#define MUTEX_MOD_DISP_WDMA0 BIT(16) 50#define MT8173_MUTEX_MOD_DISP_RDMA1 BIT(14)
45#define MUTEX_MOD_DISP_WDMA1 BIT(17) 51#define MT8173_MUTEX_MOD_DISP_RDMA2 BIT(15)
46#define MUTEX_MOD_DISP_COLOR0 BIT(18) 52#define MT8173_MUTEX_MOD_DISP_WDMA0 BIT(16)
47#define MUTEX_MOD_DISP_COLOR1 BIT(19) 53#define MT8173_MUTEX_MOD_DISP_WDMA1 BIT(17)
48#define MUTEX_MOD_DISP_AAL BIT(20) 54#define MT8173_MUTEX_MOD_DISP_COLOR0 BIT(18)
49#define MUTEX_MOD_DISP_GAMMA BIT(21) 55#define MT8173_MUTEX_MOD_DISP_COLOR1 BIT(19)
50#define MUTEX_MOD_DISP_UFOE BIT(22) 56#define MT8173_MUTEX_MOD_DISP_AAL BIT(20)
51#define MUTEX_MOD_DISP_PWM0 BIT(23) 57#define MT8173_MUTEX_MOD_DISP_GAMMA BIT(21)
52#define MUTEX_MOD_DISP_PWM1 BIT(24) 58#define MT8173_MUTEX_MOD_DISP_UFOE BIT(22)
53#define MUTEX_MOD_DISP_OD BIT(25) 59#define MT8173_MUTEX_MOD_DISP_PWM0 BIT(23)
60#define MT8173_MUTEX_MOD_DISP_PWM1 BIT(24)
61#define MT8173_MUTEX_MOD_DISP_OD BIT(25)
62
63#define MT2701_MUTEX_MOD_DISP_OVL BIT(3)
64#define MT2701_MUTEX_MOD_DISP_WDMA BIT(6)
65#define MT2701_MUTEX_MOD_DISP_COLOR BIT(7)
66#define MT2701_MUTEX_MOD_DISP_BLS BIT(9)
67#define MT2701_MUTEX_MOD_DISP_RDMA0 BIT(10)
68#define MT2701_MUTEX_MOD_DISP_RDMA1 BIT(12)
54 69
55#define MUTEX_SOF_SINGLE_MODE 0 70#define MUTEX_SOF_SINGLE_MODE 0
56#define MUTEX_SOF_DSI0 1 71#define MUTEX_SOF_DSI0 1
@@ -67,6 +82,10 @@
67#define DPI0_SEL_IN_RDMA1 0x1 82#define DPI0_SEL_IN_RDMA1 0x1
68#define COLOR1_SEL_IN_OVL1 0x1 83#define COLOR1_SEL_IN_OVL1 0x1
69 84
85#define OVL_MOUT_EN_RDMA 0x1
86#define BLS_TO_DSI_RDMA1_TO_DPI1 0x8
87#define DSI_SEL_IN_BLS 0x0
88
70struct mtk_disp_mutex { 89struct mtk_disp_mutex {
71 int id; 90 int id;
72 bool claimed; 91 bool claimed;
@@ -77,24 +96,34 @@ struct mtk_ddp {
77 struct clk *clk; 96 struct clk *clk;
78 void __iomem *regs; 97 void __iomem *regs;
79 struct mtk_disp_mutex mutex[10]; 98 struct mtk_disp_mutex mutex[10];
99 const unsigned int *mutex_mod;
100};
101
102static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = {
103 [DDP_COMPONENT_BLS] = MT2701_MUTEX_MOD_DISP_BLS,
104 [DDP_COMPONENT_COLOR0] = MT2701_MUTEX_MOD_DISP_COLOR,
105 [DDP_COMPONENT_OVL0] = MT2701_MUTEX_MOD_DISP_OVL,
106 [DDP_COMPONENT_RDMA0] = MT2701_MUTEX_MOD_DISP_RDMA0,
107 [DDP_COMPONENT_RDMA1] = MT2701_MUTEX_MOD_DISP_RDMA1,
108 [DDP_COMPONENT_WDMA0] = MT2701_MUTEX_MOD_DISP_WDMA,
80}; 109};
81 110
82static const unsigned int mutex_mod[DDP_COMPONENT_ID_MAX] = { 111static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = {
83 [DDP_COMPONENT_AAL] = MUTEX_MOD_DISP_AAL, 112 [DDP_COMPONENT_AAL] = MT8173_MUTEX_MOD_DISP_AAL,
84 [DDP_COMPONENT_COLOR0] = MUTEX_MOD_DISP_COLOR0, 113 [DDP_COMPONENT_COLOR0] = MT8173_MUTEX_MOD_DISP_COLOR0,
85 [DDP_COMPONENT_COLOR1] = MUTEX_MOD_DISP_COLOR1, 114 [DDP_COMPONENT_COLOR1] = MT8173_MUTEX_MOD_DISP_COLOR1,
86 [DDP_COMPONENT_GAMMA] = MUTEX_MOD_DISP_GAMMA, 115 [DDP_COMPONENT_GAMMA] = MT8173_MUTEX_MOD_DISP_GAMMA,
87 [DDP_COMPONENT_OD] = MUTEX_MOD_DISP_OD, 116 [DDP_COMPONENT_OD] = MT8173_MUTEX_MOD_DISP_OD,
88 [DDP_COMPONENT_OVL0] = MUTEX_MOD_DISP_OVL0, 117 [DDP_COMPONENT_OVL0] = MT8173_MUTEX_MOD_DISP_OVL0,
89 [DDP_COMPONENT_OVL1] = MUTEX_MOD_DISP_OVL1, 118 [DDP_COMPONENT_OVL1] = MT8173_MUTEX_MOD_DISP_OVL1,
90 [DDP_COMPONENT_PWM0] = MUTEX_MOD_DISP_PWM0, 119 [DDP_COMPONENT_PWM0] = MT8173_MUTEX_MOD_DISP_PWM0,
91 [DDP_COMPONENT_PWM1] = MUTEX_MOD_DISP_PWM1, 120 [DDP_COMPONENT_PWM1] = MT8173_MUTEX_MOD_DISP_PWM1,
92 [DDP_COMPONENT_RDMA0] = MUTEX_MOD_DISP_RDMA0, 121 [DDP_COMPONENT_RDMA0] = MT8173_MUTEX_MOD_DISP_RDMA0,
93 [DDP_COMPONENT_RDMA1] = MUTEX_MOD_DISP_RDMA1, 122 [DDP_COMPONENT_RDMA1] = MT8173_MUTEX_MOD_DISP_RDMA1,
94 [DDP_COMPONENT_RDMA2] = MUTEX_MOD_DISP_RDMA2, 123 [DDP_COMPONENT_RDMA2] = MT8173_MUTEX_MOD_DISP_RDMA2,
95 [DDP_COMPONENT_UFOE] = MUTEX_MOD_DISP_UFOE, 124 [DDP_COMPONENT_UFOE] = MT8173_MUTEX_MOD_DISP_UFOE,
96 [DDP_COMPONENT_WDMA0] = MUTEX_MOD_DISP_WDMA0, 125 [DDP_COMPONENT_WDMA0] = MT8173_MUTEX_MOD_DISP_WDMA0,
97 [DDP_COMPONENT_WDMA1] = MUTEX_MOD_DISP_WDMA1, 126 [DDP_COMPONENT_WDMA1] = MT8173_MUTEX_MOD_DISP_WDMA1,
98}; 127};
99 128
100static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur, 129static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
@@ -106,6 +135,9 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
106 if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) { 135 if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) {
107 *addr = DISP_REG_CONFIG_DISP_OVL0_MOUT_EN; 136 *addr = DISP_REG_CONFIG_DISP_OVL0_MOUT_EN;
108 value = OVL0_MOUT_EN_COLOR0; 137 value = OVL0_MOUT_EN_COLOR0;
138 } else if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_RDMA0) {
139 *addr = DISP_REG_CONFIG_DISP_OVL_MOUT_EN;
140 value = OVL_MOUT_EN_RDMA;
109 } else if (cur == DDP_COMPONENT_OD && next == DDP_COMPONENT_RDMA0) { 141 } else if (cur == DDP_COMPONENT_OD && next == DDP_COMPONENT_RDMA0) {
110 *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN; 142 *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
111 value = OD_MOUT_EN_RDMA0; 143 value = OD_MOUT_EN_RDMA0;
@@ -143,6 +175,9 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
143 } else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) { 175 } else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
144 *addr = DISP_REG_CONFIG_DISP_COLOR1_SEL_IN; 176 *addr = DISP_REG_CONFIG_DISP_COLOR1_SEL_IN;
145 value = COLOR1_SEL_IN_OVL1; 177 value = COLOR1_SEL_IN_OVL1;
178 } else if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) {
179 *addr = DISP_REG_CONFIG_DSI_SEL;
180 value = DSI_SEL_IN_BLS;
146 } else { 181 } else {
147 value = 0; 182 value = 0;
148 } 183 }
@@ -150,6 +185,15 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
150 return value; 185 return value;
151} 186}
152 187
188static void mtk_ddp_sout_sel(void __iomem *config_regs,
189 enum mtk_ddp_comp_id cur,
190 enum mtk_ddp_comp_id next)
191{
192 if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0)
193 writel_relaxed(BLS_TO_DSI_RDMA1_TO_DPI1,
194 config_regs + DISP_REG_CONFIG_OUT_SEL);
195}
196
153void mtk_ddp_add_comp_to_path(void __iomem *config_regs, 197void mtk_ddp_add_comp_to_path(void __iomem *config_regs,
154 enum mtk_ddp_comp_id cur, 198 enum mtk_ddp_comp_id cur,
155 enum mtk_ddp_comp_id next) 199 enum mtk_ddp_comp_id next)
@@ -162,6 +206,8 @@ void mtk_ddp_add_comp_to_path(void __iomem *config_regs,
162 writel_relaxed(reg, config_regs + addr); 206 writel_relaxed(reg, config_regs + addr);
163 } 207 }
164 208
209 mtk_ddp_sout_sel(config_regs, cur, next);
210
165 value = mtk_ddp_sel_in(cur, next, &addr); 211 value = mtk_ddp_sel_in(cur, next, &addr);
166 if (value) { 212 if (value) {
167 reg = readl_relaxed(config_regs + addr) | value; 213 reg = readl_relaxed(config_regs + addr) | value;
@@ -247,7 +293,7 @@ void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
247 break; 293 break;
248 default: 294 default:
249 reg = readl_relaxed(ddp->regs + DISP_REG_MUTEX_MOD(mutex->id)); 295 reg = readl_relaxed(ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
250 reg |= mutex_mod[id]; 296 reg |= ddp->mutex_mod[id];
251 writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_MOD(mutex->id)); 297 writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
252 return; 298 return;
253 } 299 }
@@ -273,7 +319,7 @@ void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
273 break; 319 break;
274 default: 320 default:
275 reg = readl_relaxed(ddp->regs + DISP_REG_MUTEX_MOD(mutex->id)); 321 reg = readl_relaxed(ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
276 reg &= ~mutex_mod[id]; 322 reg &= ~(ddp->mutex_mod[id]);
277 writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_MOD(mutex->id)); 323 writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
278 break; 324 break;
279 } 325 }
@@ -299,6 +345,27 @@ void mtk_disp_mutex_disable(struct mtk_disp_mutex *mutex)
299 writel(0, ddp->regs + DISP_REG_MUTEX_EN(mutex->id)); 345 writel(0, ddp->regs + DISP_REG_MUTEX_EN(mutex->id));
300} 346}
301 347
348void mtk_disp_mutex_acquire(struct mtk_disp_mutex *mutex)
349{
350 struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
351 mutex[mutex->id]);
352 u32 tmp;
353
354 writel(1, ddp->regs + DISP_REG_MUTEX_EN(mutex->id));
355 writel(1, ddp->regs + DISP_REG_MUTEX(mutex->id));
356 if (readl_poll_timeout_atomic(ddp->regs + DISP_REG_MUTEX(mutex->id),
357 tmp, tmp & INT_MUTEX, 1, 10000))
358 pr_err("could not acquire mutex %d\n", mutex->id);
359}
360
361void mtk_disp_mutex_release(struct mtk_disp_mutex *mutex)
362{
363 struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
364 mutex[mutex->id]);
365
366 writel(0, ddp->regs + DISP_REG_MUTEX(mutex->id));
367}
368
302static int mtk_ddp_probe(struct platform_device *pdev) 369static int mtk_ddp_probe(struct platform_device *pdev)
303{ 370{
304 struct device *dev = &pdev->dev; 371 struct device *dev = &pdev->dev;
@@ -326,6 +393,8 @@ static int mtk_ddp_probe(struct platform_device *pdev)
326 return PTR_ERR(ddp->regs); 393 return PTR_ERR(ddp->regs);
327 } 394 }
328 395
396 ddp->mutex_mod = of_device_get_match_data(dev);
397
329 platform_set_drvdata(pdev, ddp); 398 platform_set_drvdata(pdev, ddp);
330 399
331 return 0; 400 return 0;
@@ -337,7 +406,8 @@ static int mtk_ddp_remove(struct platform_device *pdev)
337} 406}
338 407
339static const struct of_device_id ddp_driver_dt_match[] = { 408static const struct of_device_id ddp_driver_dt_match[] = {
340 { .compatible = "mediatek,mt8173-disp-mutex" }, 409 { .compatible = "mediatek,mt2701-disp-mutex", .data = mt2701_mutex_mod},
410 { .compatible = "mediatek,mt8173-disp-mutex", .data = mt8173_mutex_mod},
341 {}, 411 {},
342}; 412};
343MODULE_DEVICE_TABLE(of, ddp_driver_dt_match); 413MODULE_DEVICE_TABLE(of, ddp_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
index 92c11752ff65..f9a799168077 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
@@ -37,5 +37,7 @@ void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
37 enum mtk_ddp_comp_id id); 37 enum mtk_ddp_comp_id id);
38void mtk_disp_mutex_unprepare(struct mtk_disp_mutex *mutex); 38void mtk_disp_mutex_unprepare(struct mtk_disp_mutex *mutex);
39void mtk_disp_mutex_put(struct mtk_disp_mutex *mutex); 39void mtk_disp_mutex_put(struct mtk_disp_mutex *mutex);
40void mtk_disp_mutex_acquire(struct mtk_disp_mutex *mutex);
41void mtk_disp_mutex_release(struct mtk_disp_mutex *mutex);
40 42
41#endif /* MTK_DRM_DDP_H */ 43#endif /* MTK_DRM_DDP_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 48cc01fd20c7..8b52416b6e41 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -39,9 +39,11 @@
39#define DISP_REG_UFO_START 0x0000 39#define DISP_REG_UFO_START 0x0000
40 40
41#define DISP_COLOR_CFG_MAIN 0x0400 41#define DISP_COLOR_CFG_MAIN 0x0400
42#define DISP_COLOR_START 0x0c00 42#define DISP_COLOR_START_MT2701 0x0f00
43#define DISP_COLOR_WIDTH 0x0c50 43#define DISP_COLOR_START_MT8173 0x0c00
44#define DISP_COLOR_HEIGHT 0x0c54 44#define DISP_COLOR_START(comp) ((comp)->data->color_offset)
45#define DISP_COLOR_WIDTH(comp) (DISP_COLOR_START(comp) + 0x50)
46#define DISP_COLOR_HEIGHT(comp) (DISP_COLOR_START(comp) + 0x54)
45 47
46#define DISP_AAL_EN 0x0000 48#define DISP_AAL_EN 0x0000
47#define DISP_AAL_SIZE 0x0030 49#define DISP_AAL_SIZE 0x0030
@@ -80,6 +82,20 @@
80#define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4) 82#define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4)
81#define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0) 83#define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0)
82 84
85struct mtk_disp_color_data {
86 unsigned int color_offset;
87};
88
89struct mtk_disp_color {
90 struct mtk_ddp_comp ddp_comp;
91 const struct mtk_disp_color_data *data;
92};
93
94static inline struct mtk_disp_color *comp_to_color(struct mtk_ddp_comp *comp)
95{
96 return container_of(comp, struct mtk_disp_color, ddp_comp);
97}
98
83void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc, 99void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
84 unsigned int CFG) 100 unsigned int CFG)
85{ 101{
@@ -107,15 +123,19 @@ static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w,
107 unsigned int h, unsigned int vrefresh, 123 unsigned int h, unsigned int vrefresh,
108 unsigned int bpc) 124 unsigned int bpc)
109{ 125{
110 writel(w, comp->regs + DISP_COLOR_WIDTH); 126 struct mtk_disp_color *color = comp_to_color(comp);
111 writel(h, comp->regs + DISP_COLOR_HEIGHT); 127
128 writel(w, comp->regs + DISP_COLOR_WIDTH(color));
129 writel(h, comp->regs + DISP_COLOR_HEIGHT(color));
112} 130}
113 131
114static void mtk_color_start(struct mtk_ddp_comp *comp) 132static void mtk_color_start(struct mtk_ddp_comp *comp)
115{ 133{
134 struct mtk_disp_color *color = comp_to_color(comp);
135
116 writel(COLOR_BYPASS_ALL | COLOR_SEQ_SEL, 136 writel(COLOR_BYPASS_ALL | COLOR_SEQ_SEL,
117 comp->regs + DISP_COLOR_CFG_MAIN); 137 comp->regs + DISP_COLOR_CFG_MAIN);
118 writel(0x1, comp->regs + DISP_COLOR_START); 138 writel(0x1, comp->regs + DISP_COLOR_START(color));
119} 139}
120 140
121static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w, 141static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w,
@@ -236,6 +256,7 @@ static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = {
236 [MTK_DISP_PWM] = "pwm", 256 [MTK_DISP_PWM] = "pwm",
237 [MTK_DISP_MUTEX] = "mutex", 257 [MTK_DISP_MUTEX] = "mutex",
238 [MTK_DISP_OD] = "od", 258 [MTK_DISP_OD] = "od",
259 [MTK_DISP_BLS] = "bls",
239}; 260};
240 261
241struct mtk_ddp_comp_match { 262struct mtk_ddp_comp_match {
@@ -246,6 +267,7 @@ struct mtk_ddp_comp_match {
246 267
247static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = { 268static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
248 [DDP_COMPONENT_AAL] = { MTK_DISP_AAL, 0, &ddp_aal }, 269 [DDP_COMPONENT_AAL] = { MTK_DISP_AAL, 0, &ddp_aal },
270 [DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL },
249 [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color }, 271 [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color },
250 [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color }, 272 [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color },
251 [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL }, 273 [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL },
@@ -264,6 +286,22 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
264 [DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL }, 286 [DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL },
265}; 287};
266 288
289static const struct mtk_disp_color_data mt2701_color_driver_data = {
290 .color_offset = DISP_COLOR_START_MT2701,
291};
292
293static const struct mtk_disp_color_data mt8173_color_driver_data = {
294 .color_offset = DISP_COLOR_START_MT8173,
295};
296
297static const struct of_device_id mtk_disp_color_driver_dt_match[] = {
298 { .compatible = "mediatek,mt2701-disp-color",
299 .data = &mt2701_color_driver_data},
300 { .compatible = "mediatek,mt8173-disp-color",
301 .data = &mt8173_color_driver_data},
302 {},
303};
304
267int mtk_ddp_comp_get_id(struct device_node *node, 305int mtk_ddp_comp_get_id(struct device_node *node,
268 enum mtk_ddp_comp_type comp_type) 306 enum mtk_ddp_comp_type comp_type)
269{ 307{
@@ -286,14 +324,29 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
286 enum mtk_ddp_comp_type type; 324 enum mtk_ddp_comp_type type;
287 struct device_node *larb_node; 325 struct device_node *larb_node;
288 struct platform_device *larb_pdev; 326 struct platform_device *larb_pdev;
327 const struct of_device_id *match;
328 struct mtk_disp_color *color;
289 329
290 if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX) 330 if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX)
291 return -EINVAL; 331 return -EINVAL;
292 332
333 type = mtk_ddp_matches[comp_id].type;
334 if (type == MTK_DISP_COLOR) {
335 devm_kfree(dev, comp);
336 color = devm_kzalloc(dev, sizeof(*color), GFP_KERNEL);
337 if (!color)
338 return -ENOMEM;
339
340 match = of_match_node(mtk_disp_color_driver_dt_match, node);
341 color->data = match->data;
342 comp = &color->ddp_comp;
343 }
344
293 comp->id = comp_id; 345 comp->id = comp_id;
294 comp->funcs = funcs ?: mtk_ddp_matches[comp_id].funcs; 346 comp->funcs = funcs ?: mtk_ddp_matches[comp_id].funcs;
295 347
296 if (comp_id == DDP_COMPONENT_DPI0 || 348 if (comp_id == DDP_COMPONENT_BLS ||
349 comp_id == DDP_COMPONENT_DPI0 ||
297 comp_id == DDP_COMPONENT_DSI0 || 350 comp_id == DDP_COMPONENT_DSI0 ||
298 comp_id == DDP_COMPONENT_PWM0) { 351 comp_id == DDP_COMPONENT_PWM0) {
299 comp->regs = NULL; 352 comp->regs = NULL;
@@ -308,8 +361,6 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
308 if (IS_ERR(comp->clk)) 361 if (IS_ERR(comp->clk))
309 comp->clk = NULL; 362 comp->clk = NULL;
310 363
311 type = mtk_ddp_matches[comp_id].type;
312
313 /* Only DMA capable components need the LARB property */ 364 /* Only DMA capable components need the LARB property */
314 comp->larb_dev = NULL; 365 comp->larb_dev = NULL;
315 if (type != MTK_DISP_OVL && 366 if (type != MTK_DISP_OVL &&
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 22a33ee451c4..0828cf8bf85c 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -36,11 +36,13 @@ enum mtk_ddp_comp_type {
36 MTK_DISP_PWM, 36 MTK_DISP_PWM,
37 MTK_DISP_MUTEX, 37 MTK_DISP_MUTEX,
38 MTK_DISP_OD, 38 MTK_DISP_OD,
39 MTK_DISP_BLS,
39 MTK_DDP_COMP_TYPE_MAX, 40 MTK_DDP_COMP_TYPE_MAX,
40}; 41};
41 42
42enum mtk_ddp_comp_id { 43enum mtk_ddp_comp_id {
43 DDP_COMPONENT_AAL, 44 DDP_COMPONENT_AAL,
45 DDP_COMPONENT_BLS,
44 DDP_COMPONENT_COLOR0, 46 DDP_COMPONENT_COLOR0,
45 DDP_COMPONENT_COLOR1, 47 DDP_COMPONENT_COLOR1,
46 DDP_COMPONENT_DPI0, 48 DDP_COMPONENT_DPI0,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index f5a1fd9b3ecc..f6c8ec4c7dbc 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -128,7 +128,20 @@ static const struct drm_mode_config_funcs mtk_drm_mode_config_funcs = {
128 .atomic_commit = mtk_atomic_commit, 128 .atomic_commit = mtk_atomic_commit,
129}; 129};
130 130
131static const enum mtk_ddp_comp_id mtk_ddp_main[] = { 131static const enum mtk_ddp_comp_id mt2701_mtk_ddp_main[] = {
132 DDP_COMPONENT_OVL0,
133 DDP_COMPONENT_RDMA0,
134 DDP_COMPONENT_COLOR0,
135 DDP_COMPONENT_BLS,
136 DDP_COMPONENT_DSI0,
137};
138
139static const enum mtk_ddp_comp_id mt2701_mtk_ddp_ext[] = {
140 DDP_COMPONENT_RDMA1,
141 DDP_COMPONENT_DPI0,
142};
143
144static const enum mtk_ddp_comp_id mt8173_mtk_ddp_main[] = {
132 DDP_COMPONENT_OVL0, 145 DDP_COMPONENT_OVL0,
133 DDP_COMPONENT_COLOR0, 146 DDP_COMPONENT_COLOR0,
134 DDP_COMPONENT_AAL, 147 DDP_COMPONENT_AAL,
@@ -139,7 +152,7 @@ static const enum mtk_ddp_comp_id mtk_ddp_main[] = {
139 DDP_COMPONENT_PWM0, 152 DDP_COMPONENT_PWM0,
140}; 153};
141 154
142static const enum mtk_ddp_comp_id mtk_ddp_ext[] = { 155static const enum mtk_ddp_comp_id mt8173_mtk_ddp_ext[] = {
143 DDP_COMPONENT_OVL1, 156 DDP_COMPONENT_OVL1,
144 DDP_COMPONENT_COLOR1, 157 DDP_COMPONENT_COLOR1,
145 DDP_COMPONENT_GAMMA, 158 DDP_COMPONENT_GAMMA,
@@ -147,6 +160,21 @@ static const enum mtk_ddp_comp_id mtk_ddp_ext[] = {
147 DDP_COMPONENT_DPI0, 160 DDP_COMPONENT_DPI0,
148}; 161};
149 162
163static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
164 .main_path = mt2701_mtk_ddp_main,
165 .main_len = ARRAY_SIZE(mt2701_mtk_ddp_main),
166 .ext_path = mt2701_mtk_ddp_ext,
167 .ext_len = ARRAY_SIZE(mt2701_mtk_ddp_ext),
168 .shadow_register = true,
169};
170
171static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = {
172 .main_path = mt8173_mtk_ddp_main,
173 .main_len = ARRAY_SIZE(mt8173_mtk_ddp_main),
174 .ext_path = mt8173_mtk_ddp_ext,
175 .ext_len = ARRAY_SIZE(mt8173_mtk_ddp_ext),
176};
177
150static int mtk_drm_kms_init(struct drm_device *drm) 178static int mtk_drm_kms_init(struct drm_device *drm)
151{ 179{
152 struct mtk_drm_private *private = drm->dev_private; 180 struct mtk_drm_private *private = drm->dev_private;
@@ -189,17 +217,19 @@ static int mtk_drm_kms_init(struct drm_device *drm)
189 * and each statically assigned to a crtc: 217 * and each statically assigned to a crtc:
190 * OVL0 -> COLOR0 -> AAL -> OD -> RDMA0 -> UFOE -> DSI0 ... 218 * OVL0 -> COLOR0 -> AAL -> OD -> RDMA0 -> UFOE -> DSI0 ...
191 */ 219 */
192 ret = mtk_drm_crtc_create(drm, mtk_ddp_main, ARRAY_SIZE(mtk_ddp_main)); 220 ret = mtk_drm_crtc_create(drm, private->data->main_path,
221 private->data->main_len);
193 if (ret < 0) 222 if (ret < 0)
194 goto err_component_unbind; 223 goto err_component_unbind;
195 /* ... and OVL1 -> COLOR1 -> GAMMA -> RDMA1 -> DPI0. */ 224 /* ... and OVL1 -> COLOR1 -> GAMMA -> RDMA1 -> DPI0. */
196 ret = mtk_drm_crtc_create(drm, mtk_ddp_ext, ARRAY_SIZE(mtk_ddp_ext)); 225 ret = mtk_drm_crtc_create(drm, private->data->ext_path,
226 private->data->ext_len);
197 if (ret < 0) 227 if (ret < 0)
198 goto err_component_unbind; 228 goto err_component_unbind;
199 229
200 /* Use OVL device for all DMA memory allocations */ 230 /* Use OVL device for all DMA memory allocations */
201 np = private->comp_node[mtk_ddp_main[0]] ?: 231 np = private->comp_node[private->data->main_path[0]] ?:
202 private->comp_node[mtk_ddp_ext[0]]; 232 private->comp_node[private->data->ext_path[0]];
203 pdev = of_find_device_by_node(np); 233 pdev = of_find_device_by_node(np);
204 if (!pdev) { 234 if (!pdev) {
205 ret = -ENODEV; 235 ret = -ENODEV;
@@ -328,16 +358,22 @@ static const struct component_master_ops mtk_drm_ops = {
328}; 358};
329 359
330static const struct of_device_id mtk_ddp_comp_dt_ids[] = { 360static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
361 { .compatible = "mediatek,mt2701-disp-ovl", .data = (void *)MTK_DISP_OVL },
331 { .compatible = "mediatek,mt8173-disp-ovl", .data = (void *)MTK_DISP_OVL }, 362 { .compatible = "mediatek,mt8173-disp-ovl", .data = (void *)MTK_DISP_OVL },
363 { .compatible = "mediatek,mt2701-disp-rdma", .data = (void *)MTK_DISP_RDMA },
332 { .compatible = "mediatek,mt8173-disp-rdma", .data = (void *)MTK_DISP_RDMA }, 364 { .compatible = "mediatek,mt8173-disp-rdma", .data = (void *)MTK_DISP_RDMA },
333 { .compatible = "mediatek,mt8173-disp-wdma", .data = (void *)MTK_DISP_WDMA }, 365 { .compatible = "mediatek,mt8173-disp-wdma", .data = (void *)MTK_DISP_WDMA },
366 { .compatible = "mediatek,mt2701-disp-color", .data = (void *)MTK_DISP_COLOR },
334 { .compatible = "mediatek,mt8173-disp-color", .data = (void *)MTK_DISP_COLOR }, 367 { .compatible = "mediatek,mt8173-disp-color", .data = (void *)MTK_DISP_COLOR },
335 { .compatible = "mediatek,mt8173-disp-aal", .data = (void *)MTK_DISP_AAL}, 368 { .compatible = "mediatek,mt8173-disp-aal", .data = (void *)MTK_DISP_AAL},
336 { .compatible = "mediatek,mt8173-disp-gamma", .data = (void *)MTK_DISP_GAMMA, }, 369 { .compatible = "mediatek,mt8173-disp-gamma", .data = (void *)MTK_DISP_GAMMA, },
337 { .compatible = "mediatek,mt8173-disp-ufoe", .data = (void *)MTK_DISP_UFOE }, 370 { .compatible = "mediatek,mt8173-disp-ufoe", .data = (void *)MTK_DISP_UFOE },
371 { .compatible = "mediatek,mt2701-dsi", .data = (void *)MTK_DSI },
338 { .compatible = "mediatek,mt8173-dsi", .data = (void *)MTK_DSI }, 372 { .compatible = "mediatek,mt8173-dsi", .data = (void *)MTK_DSI },
339 { .compatible = "mediatek,mt8173-dpi", .data = (void *)MTK_DPI }, 373 { .compatible = "mediatek,mt8173-dpi", .data = (void *)MTK_DPI },
374 { .compatible = "mediatek,mt2701-disp-mutex", .data = (void *)MTK_DISP_MUTEX },
340 { .compatible = "mediatek,mt8173-disp-mutex", .data = (void *)MTK_DISP_MUTEX }, 375 { .compatible = "mediatek,mt8173-disp-mutex", .data = (void *)MTK_DISP_MUTEX },
376 { .compatible = "mediatek,mt2701-disp-pwm", .data = (void *)MTK_DISP_BLS },
341 { .compatible = "mediatek,mt8173-disp-pwm", .data = (void *)MTK_DISP_PWM }, 377 { .compatible = "mediatek,mt8173-disp-pwm", .data = (void *)MTK_DISP_PWM },
342 { .compatible = "mediatek,mt8173-disp-od", .data = (void *)MTK_DISP_OD }, 378 { .compatible = "mediatek,mt8173-disp-od", .data = (void *)MTK_DISP_OD },
343 { } 379 { }
@@ -359,6 +395,7 @@ static int mtk_drm_probe(struct platform_device *pdev)
359 395
360 mutex_init(&private->commit.lock); 396 mutex_init(&private->commit.lock);
361 INIT_WORK(&private->commit.work, mtk_atomic_work); 397 INIT_WORK(&private->commit.work, mtk_atomic_work);
398 private->data = of_device_get_match_data(dev);
362 399
363 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 400 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
364 private->config_regs = devm_ioremap_resource(dev, mem); 401 private->config_regs = devm_ioremap_resource(dev, mem);
@@ -510,7 +547,10 @@ static SIMPLE_DEV_PM_OPS(mtk_drm_pm_ops, mtk_drm_sys_suspend,
510 mtk_drm_sys_resume); 547 mtk_drm_sys_resume);
511 548
512static const struct of_device_id mtk_drm_of_ids[] = { 549static const struct of_device_id mtk_drm_of_ids[] = {
513 { .compatible = "mediatek,mt8173-mmsys", }, 550 { .compatible = "mediatek,mt2701-mmsys",
551 .data = &mt2701_mmsys_driver_data},
552 { .compatible = "mediatek,mt8173-mmsys",
553 .data = &mt8173_mmsys_driver_data},
514 { } 554 { }
515}; 555};
516 556
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index df322a7a5fcb..aef8747d810b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -28,6 +28,14 @@ struct drm_fb_helper;
28struct drm_property; 28struct drm_property;
29struct regmap; 29struct regmap;
30 30
31struct mtk_mmsys_driver_data {
32 const enum mtk_ddp_comp_id *main_path;
33 unsigned int main_len;
34 const enum mtk_ddp_comp_id *ext_path;
35 unsigned int ext_len;
36 bool shadow_register;
37};
38
31struct mtk_drm_private { 39struct mtk_drm_private {
32 struct drm_device *drm; 40 struct drm_device *drm;
33 struct device *dma_dev; 41 struct device *dma_dev;
@@ -39,6 +47,7 @@ struct mtk_drm_private {
39 void __iomem *config_regs; 47 void __iomem *config_regs;
40 struct device_node *comp_node[DDP_COMPONENT_ID_MAX]; 48 struct device_node *comp_node[DDP_COMPONENT_ID_MAX];
41 struct mtk_ddp_comp *ddp_comp[DDP_COMPONENT_ID_MAX]; 49 struct mtk_ddp_comp *ddp_comp[DDP_COMPONENT_ID_MAX];
50 const struct mtk_mmsys_driver_data *data;
42 51
43 struct { 52 struct {
44 struct drm_atomic_state *state; 53 struct drm_atomic_state *state;
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index dd71cbb1a622..808b995a990f 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -16,22 +16,31 @@
16#include <drm/drm_crtc_helper.h> 16#include <drm/drm_crtc_helper.h>
17#include <drm/drm_mipi_dsi.h> 17#include <drm/drm_mipi_dsi.h>
18#include <drm/drm_panel.h> 18#include <drm/drm_panel.h>
19#include <drm/drm_of.h>
19#include <linux/clk.h> 20#include <linux/clk.h>
20#include <linux/component.h> 21#include <linux/component.h>
22#include <linux/irq.h>
21#include <linux/of.h> 23#include <linux/of.h>
22#include <linux/of_platform.h> 24#include <linux/of_platform.h>
23#include <linux/of_graph.h>
24#include <linux/phy/phy.h> 25#include <linux/phy/phy.h>
25#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <video/mipi_display.h>
26#include <video/videomode.h> 28#include <video/videomode.h>
27 29
28#include "mtk_drm_ddp_comp.h" 30#include "mtk_drm_ddp_comp.h"
29 31
30#define DSI_VIDEO_FIFO_DEPTH (1920 / 4)
31#define DSI_HOST_FIFO_DEPTH 64
32
33#define DSI_START 0x00 32#define DSI_START 0x00
34 33
34#define DSI_INTEN 0x08
35
36#define DSI_INTSTA 0x0c
37#define LPRX_RD_RDY_INT_FLAG BIT(0)
38#define CMD_DONE_INT_FLAG BIT(1)
39#define TE_RDY_INT_FLAG BIT(2)
40#define VM_DONE_INT_FLAG BIT(3)
41#define EXT_TE_RDY_INT_FLAG BIT(4)
42#define DSI_BUSY BIT(31)
43
35#define DSI_CON_CTRL 0x10 44#define DSI_CON_CTRL 0x10
36#define DSI_RESET BIT(0) 45#define DSI_RESET BIT(0)
37#define DSI_EN BIT(1) 46#define DSI_EN BIT(1)
@@ -46,7 +55,7 @@
46#define MIX_MODE BIT(17) 55#define MIX_MODE BIT(17)
47 56
48#define DSI_TXRX_CTRL 0x18 57#define DSI_TXRX_CTRL 0x18
49#define VC_NUM (2 << 0) 58#define VC_NUM BIT(1)
50#define LANE_NUM (0xf << 2) 59#define LANE_NUM (0xf << 2)
51#define DIS_EOT BIT(6) 60#define DIS_EOT BIT(6)
52#define NULL_EN BIT(7) 61#define NULL_EN BIT(7)
@@ -72,8 +81,19 @@
72#define DSI_HBP_WC 0x54 81#define DSI_HBP_WC 0x54
73#define DSI_HFP_WC 0x58 82#define DSI_HFP_WC 0x58
74 83
84#define DSI_CMDQ_SIZE 0x60
85#define CMDQ_SIZE 0x3f
86
75#define DSI_HSTX_CKL_WC 0x64 87#define DSI_HSTX_CKL_WC 0x64
76 88
89#define DSI_RX_DATA0 0x74
90#define DSI_RX_DATA1 0x78
91#define DSI_RX_DATA2 0x7c
92#define DSI_RX_DATA3 0x80
93
94#define DSI_RACK 0x84
95#define RACK BIT(0)
96
77#define DSI_PHY_LCCON 0x104 97#define DSI_PHY_LCCON 0x104
78#define LC_HS_TX_EN BIT(0) 98#define LC_HS_TX_EN BIT(0)
79#define LC_ULPM_EN BIT(1) 99#define LC_ULPM_EN BIT(1)
@@ -106,6 +126,19 @@
106#define CLK_HS_POST (0xff << 8) 126#define CLK_HS_POST (0xff << 8)
107#define CLK_HS_EXIT (0xff << 16) 127#define CLK_HS_EXIT (0xff << 16)
108 128
129#define DSI_VM_CMD_CON 0x130
130#define VM_CMD_EN BIT(0)
131#define TS_VFP_EN BIT(5)
132
133#define DSI_CMDQ0 0x180
134#define CONFIG (0xff << 0)
135#define SHORT_PACKET 0
136#define LONG_PACKET 2
137#define BTA BIT(2)
138#define DATA_ID (0xff << 8)
139#define DATA_0 (0xff << 16)
140#define DATA_1 (0xff << 24)
141
109#define T_LPX 5 142#define T_LPX 5
110#define T_HS_PREP 6 143#define T_HS_PREP 6
111#define T_HS_TRAIL 8 144#define T_HS_TRAIL 8
@@ -114,6 +147,12 @@
114 147
115#define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0)) 148#define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0))
116 149
150#define MTK_DSI_HOST_IS_READ(type) \
151 ((type == MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM) || \
152 (type == MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM) || \
153 (type == MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM) || \
154 (type == MIPI_DSI_DCS_READ))
155
117struct phy; 156struct phy;
118 157
119struct mtk_dsi { 158struct mtk_dsi {
@@ -140,6 +179,8 @@ struct mtk_dsi {
140 struct videomode vm; 179 struct videomode vm;
141 int refcount; 180 int refcount;
142 bool enabled; 181 bool enabled;
182 u32 irq_data;
183 wait_queue_head_t irq_wait_queue;
143}; 184};
144 185
145static inline struct mtk_dsi *encoder_to_dsi(struct drm_encoder *e) 186static inline struct mtk_dsi *encoder_to_dsi(struct drm_encoder *e)
@@ -164,7 +205,7 @@ static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data)
164 writel((temp & ~mask) | (data & mask), dsi->regs + offset); 205 writel((temp & ~mask) | (data & mask), dsi->regs + offset);
165} 206}
166 207
167static void dsi_phy_timconfig(struct mtk_dsi *dsi) 208static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
168{ 209{
169 u32 timcon0, timcon1, timcon2, timcon3; 210 u32 timcon0, timcon1, timcon2, timcon3;
170 u32 ui, cycle_time; 211 u32 ui, cycle_time;
@@ -196,118 +237,39 @@ static void mtk_dsi_disable(struct mtk_dsi *dsi)
196 mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, 0); 237 mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, 0);
197} 238}
198 239
199static void mtk_dsi_reset(struct mtk_dsi *dsi) 240static void mtk_dsi_reset_engine(struct mtk_dsi *dsi)
200{ 241{
201 mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, DSI_RESET); 242 mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, DSI_RESET);
202 mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, 0); 243 mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, 0);
203} 244}
204 245
205static int mtk_dsi_poweron(struct mtk_dsi *dsi) 246static void mtk_dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi)
206{
207 struct device *dev = dsi->dev;
208 int ret;
209 u64 pixel_clock, total_bits;
210 u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits;
211
212 if (++dsi->refcount != 1)
213 return 0;
214
215 switch (dsi->format) {
216 case MIPI_DSI_FMT_RGB565:
217 bit_per_pixel = 16;
218 break;
219 case MIPI_DSI_FMT_RGB666_PACKED:
220 bit_per_pixel = 18;
221 break;
222 case MIPI_DSI_FMT_RGB666:
223 case MIPI_DSI_FMT_RGB888:
224 default:
225 bit_per_pixel = 24;
226 break;
227 }
228
229 /**
230 * vm.pixelclock is in kHz, pixel_clock unit is Hz, so multiply by 1000
231 * htotal_time = htotal * byte_per_pixel / num_lanes
232 * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit
233 * mipi_ratio = (htotal_time + overhead_time) / htotal_time
234 * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes;
235 */
236 pixel_clock = dsi->vm.pixelclock * 1000;
237 htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch +
238 dsi->vm.hsync_len;
239 htotal_bits = htotal * bit_per_pixel;
240
241 overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL +
242 T_HS_EXIT;
243 overhead_bits = overhead_cycles * dsi->lanes * 8;
244 total_bits = htotal_bits + overhead_bits;
245
246 dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits,
247 htotal * dsi->lanes);
248
249 ret = clk_set_rate(dsi->hs_clk, dsi->data_rate);
250 if (ret < 0) {
251 dev_err(dev, "Failed to set data rate: %d\n", ret);
252 goto err_refcount;
253 }
254
255 phy_power_on(dsi->phy);
256
257 ret = clk_prepare_enable(dsi->engine_clk);
258 if (ret < 0) {
259 dev_err(dev, "Failed to enable engine clock: %d\n", ret);
260 goto err_phy_power_off;
261 }
262
263 ret = clk_prepare_enable(dsi->digital_clk);
264 if (ret < 0) {
265 dev_err(dev, "Failed to enable digital clock: %d\n", ret);
266 goto err_disable_engine_clk;
267 }
268
269 mtk_dsi_enable(dsi);
270 mtk_dsi_reset(dsi);
271 dsi_phy_timconfig(dsi);
272
273 return 0;
274
275err_disable_engine_clk:
276 clk_disable_unprepare(dsi->engine_clk);
277err_phy_power_off:
278 phy_power_off(dsi->phy);
279err_refcount:
280 dsi->refcount--;
281 return ret;
282}
283
284static void dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi)
285{ 247{
286 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0); 248 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
287 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0); 249 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0);
288} 250}
289 251
290static void dsi_clk_ulp_mode_leave(struct mtk_dsi *dsi) 252static void mtk_dsi_clk_ulp_mode_leave(struct mtk_dsi *dsi)
291{ 253{
292 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0); 254 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0);
293 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, LC_WAKEUP_EN); 255 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, LC_WAKEUP_EN);
294 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, 0); 256 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, 0);
295} 257}
296 258
297static void dsi_lane0_ulp_mode_enter(struct mtk_dsi *dsi) 259static void mtk_dsi_lane0_ulp_mode_enter(struct mtk_dsi *dsi)
298{ 260{
299 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_HS_TX_EN, 0); 261 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_HS_TX_EN, 0);
300 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0); 262 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0);
301} 263}
302 264
303static void dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi) 265static void mtk_dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi)
304{ 266{
305 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0); 267 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0);
306 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, LD0_WAKEUP_EN); 268 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, LD0_WAKEUP_EN);
307 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, 0); 269 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, 0);
308} 270}
309 271
310static bool dsi_clk_hs_state(struct mtk_dsi *dsi) 272static bool mtk_dsi_clk_hs_state(struct mtk_dsi *dsi)
311{ 273{
312 u32 tmp_reg1; 274 u32 tmp_reg1;
313 275
@@ -315,30 +277,37 @@ static bool dsi_clk_hs_state(struct mtk_dsi *dsi)
315 return ((tmp_reg1 & LC_HS_TX_EN) == 1) ? true : false; 277 return ((tmp_reg1 & LC_HS_TX_EN) == 1) ? true : false;
316} 278}
317 279
318static void dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter) 280static void mtk_dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter)
319{ 281{
320 if (enter && !dsi_clk_hs_state(dsi)) 282 if (enter && !mtk_dsi_clk_hs_state(dsi))
321 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, LC_HS_TX_EN); 283 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, LC_HS_TX_EN);
322 else if (!enter && dsi_clk_hs_state(dsi)) 284 else if (!enter && mtk_dsi_clk_hs_state(dsi))
323 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0); 285 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
324} 286}
325 287
326static void dsi_set_mode(struct mtk_dsi *dsi) 288static void mtk_dsi_set_mode(struct mtk_dsi *dsi)
327{ 289{
328 u32 vid_mode = CMD_MODE; 290 u32 vid_mode = CMD_MODE;
329 291
330 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { 292 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
331 vid_mode = SYNC_PULSE_MODE; 293 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
332
333 if ((dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) &&
334 !(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE))
335 vid_mode = BURST_MODE; 294 vid_mode = BURST_MODE;
295 else if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
296 vid_mode = SYNC_PULSE_MODE;
297 else
298 vid_mode = SYNC_EVENT_MODE;
336 } 299 }
337 300
338 writel(vid_mode, dsi->regs + DSI_MODE_CTRL); 301 writel(vid_mode, dsi->regs + DSI_MODE_CTRL);
339} 302}
340 303
341static void dsi_ps_control_vact(struct mtk_dsi *dsi) 304static void mtk_dsi_set_vm_cmd(struct mtk_dsi *dsi)
305{
306 mtk_dsi_mask(dsi, DSI_VM_CMD_CON, VM_CMD_EN, VM_CMD_EN);
307 mtk_dsi_mask(dsi, DSI_VM_CMD_CON, TS_VFP_EN, TS_VFP_EN);
308}
309
310static void mtk_dsi_ps_control_vact(struct mtk_dsi *dsi)
342{ 311{
343 struct videomode *vm = &dsi->vm; 312 struct videomode *vm = &dsi->vm;
344 u32 dsi_buf_bpp, ps_wc; 313 u32 dsi_buf_bpp, ps_wc;
@@ -372,7 +341,7 @@ static void dsi_ps_control_vact(struct mtk_dsi *dsi)
372 writel(ps_wc, dsi->regs + DSI_HSTX_CKL_WC); 341 writel(ps_wc, dsi->regs + DSI_HSTX_CKL_WC);
373} 342}
374 343
375static void dsi_rxtx_control(struct mtk_dsi *dsi) 344static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi)
376{ 345{
377 u32 tmp_reg; 346 u32 tmp_reg;
378 347
@@ -394,12 +363,15 @@ static void dsi_rxtx_control(struct mtk_dsi *dsi)
394 break; 363 break;
395 } 364 }
396 365
366 tmp_reg |= (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) << 6;
367 tmp_reg |= (dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET) >> 3;
368
397 writel(tmp_reg, dsi->regs + DSI_TXRX_CTRL); 369 writel(tmp_reg, dsi->regs + DSI_TXRX_CTRL);
398} 370}
399 371
400static void dsi_ps_control(struct mtk_dsi *dsi) 372static void mtk_dsi_ps_control(struct mtk_dsi *dsi)
401{ 373{
402 unsigned int dsi_tmp_buf_bpp; 374 u32 dsi_tmp_buf_bpp;
403 u32 tmp_reg; 375 u32 tmp_reg;
404 376
405 switch (dsi->format) { 377 switch (dsi->format) {
@@ -429,12 +401,12 @@ static void dsi_ps_control(struct mtk_dsi *dsi)
429 writel(tmp_reg, dsi->regs + DSI_PSCTRL); 401 writel(tmp_reg, dsi->regs + DSI_PSCTRL);
430} 402}
431 403
432static void dsi_config_vdo_timing(struct mtk_dsi *dsi) 404static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
433{ 405{
434 unsigned int horizontal_sync_active_byte; 406 u32 horizontal_sync_active_byte;
435 unsigned int horizontal_backporch_byte; 407 u32 horizontal_backporch_byte;
436 unsigned int horizontal_frontporch_byte; 408 u32 horizontal_frontporch_byte;
437 unsigned int dsi_tmp_buf_bpp; 409 u32 dsi_tmp_buf_bpp;
438 410
439 struct videomode *vm = &dsi->vm; 411 struct videomode *vm = &dsi->vm;
440 412
@@ -463,7 +435,7 @@ static void dsi_config_vdo_timing(struct mtk_dsi *dsi)
463 writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC); 435 writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC);
464 writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC); 436 writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC);
465 437
466 dsi_ps_control(dsi); 438 mtk_dsi_ps_control(dsi);
467} 439}
468 440
469static void mtk_dsi_start(struct mtk_dsi *dsi) 441static void mtk_dsi_start(struct mtk_dsi *dsi)
@@ -472,6 +444,184 @@ static void mtk_dsi_start(struct mtk_dsi *dsi)
472 writel(1, dsi->regs + DSI_START); 444 writel(1, dsi->regs + DSI_START);
473} 445}
474 446
447static void mtk_dsi_stop(struct mtk_dsi *dsi)
448{
449 writel(0, dsi->regs + DSI_START);
450}
451
452static void mtk_dsi_set_cmd_mode(struct mtk_dsi *dsi)
453{
454 writel(CMD_MODE, dsi->regs + DSI_MODE_CTRL);
455}
456
457static void mtk_dsi_set_interrupt_enable(struct mtk_dsi *dsi)
458{
459 u32 inten = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG;
460
461 writel(inten, dsi->regs + DSI_INTEN);
462}
463
464static void mtk_dsi_irq_data_set(struct mtk_dsi *dsi, u32 irq_bit)
465{
466 dsi->irq_data |= irq_bit;
467}
468
469static void mtk_dsi_irq_data_clear(struct mtk_dsi *dsi, u32 irq_bit)
470{
471 dsi->irq_data &= ~irq_bit;
472}
473
474static s32 mtk_dsi_wait_for_irq_done(struct mtk_dsi *dsi, u32 irq_flag,
475 unsigned int timeout)
476{
477 s32 ret = 0;
478 unsigned long jiffies = msecs_to_jiffies(timeout);
479
480 ret = wait_event_interruptible_timeout(dsi->irq_wait_queue,
481 dsi->irq_data & irq_flag,
482 jiffies);
483 if (ret == 0) {
484 DRM_WARN("Wait DSI IRQ(0x%08x) Timeout\n", irq_flag);
485
486 mtk_dsi_enable(dsi);
487 mtk_dsi_reset_engine(dsi);
488 }
489
490 return ret;
491}
492
493static irqreturn_t mtk_dsi_irq(int irq, void *dev_id)
494{
495 struct mtk_dsi *dsi = dev_id;
496 u32 status, tmp;
497 u32 flag = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG;
498
499 status = readl(dsi->regs + DSI_INTSTA) & flag;
500
501 if (status) {
502 do {
503 mtk_dsi_mask(dsi, DSI_RACK, RACK, RACK);
504 tmp = readl(dsi->regs + DSI_INTSTA);
505 } while (tmp & DSI_BUSY);
506
507 mtk_dsi_mask(dsi, DSI_INTSTA, status, 0);
508 mtk_dsi_irq_data_set(dsi, status);
509 wake_up_interruptible(&dsi->irq_wait_queue);
510 }
511
512 return IRQ_HANDLED;
513}
514
515static s32 mtk_dsi_switch_to_cmd_mode(struct mtk_dsi *dsi, u8 irq_flag, u32 t)
516{
517 mtk_dsi_irq_data_clear(dsi, irq_flag);
518 mtk_dsi_set_cmd_mode(dsi);
519
520 if (!mtk_dsi_wait_for_irq_done(dsi, irq_flag, t)) {
521 DRM_ERROR("failed to switch cmd mode\n");
522 return -ETIME;
523 } else {
524 return 0;
525 }
526}
527
528static int mtk_dsi_poweron(struct mtk_dsi *dsi)
529{
530 struct device *dev = dsi->dev;
531 int ret;
532 u64 pixel_clock, total_bits;
533 u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits;
534
535 if (++dsi->refcount != 1)
536 return 0;
537
538 switch (dsi->format) {
539 case MIPI_DSI_FMT_RGB565:
540 bit_per_pixel = 16;
541 break;
542 case MIPI_DSI_FMT_RGB666_PACKED:
543 bit_per_pixel = 18;
544 break;
545 case MIPI_DSI_FMT_RGB666:
546 case MIPI_DSI_FMT_RGB888:
547 default:
548 bit_per_pixel = 24;
549 break;
550 }
551
552 /**
553 * vm.pixelclock is in kHz, pixel_clock unit is Hz, so multiply by 1000
554 * htotal_time = htotal * byte_per_pixel / num_lanes
555 * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit
556 * mipi_ratio = (htotal_time + overhead_time) / htotal_time
557 * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes;
558 */
559 pixel_clock = dsi->vm.pixelclock * 1000;
560 htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch +
561 dsi->vm.hsync_len;
562 htotal_bits = htotal * bit_per_pixel;
563
564 overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL +
565 T_HS_EXIT;
566 overhead_bits = overhead_cycles * dsi->lanes * 8;
567 total_bits = htotal_bits + overhead_bits;
568
569 dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits,
570 htotal * dsi->lanes);
571
572 ret = clk_set_rate(dsi->hs_clk, dsi->data_rate);
573 if (ret < 0) {
574 dev_err(dev, "Failed to set data rate: %d\n", ret);
575 goto err_refcount;
576 }
577
578 phy_power_on(dsi->phy);
579
580 ret = clk_prepare_enable(dsi->engine_clk);
581 if (ret < 0) {
582 dev_err(dev, "Failed to enable engine clock: %d\n", ret);
583 goto err_phy_power_off;
584 }
585
586 ret = clk_prepare_enable(dsi->digital_clk);
587 if (ret < 0) {
588 dev_err(dev, "Failed to enable digital clock: %d\n", ret);
589 goto err_disable_engine_clk;
590 }
591
592 mtk_dsi_enable(dsi);
593 mtk_dsi_reset_engine(dsi);
594 mtk_dsi_phy_timconfig(dsi);
595
596 mtk_dsi_rxtx_control(dsi);
597 mtk_dsi_ps_control_vact(dsi);
598 mtk_dsi_set_vm_cmd(dsi);
599 mtk_dsi_config_vdo_timing(dsi);
600 mtk_dsi_set_interrupt_enable(dsi);
601
602 mtk_dsi_clk_ulp_mode_leave(dsi);
603 mtk_dsi_lane0_ulp_mode_leave(dsi);
604 mtk_dsi_clk_hs_mode(dsi, 0);
605
606 if (dsi->panel) {
607 if (drm_panel_prepare(dsi->panel)) {
608 DRM_ERROR("failed to prepare the panel\n");
609 goto err_disable_digital_clk;
610 }
611 }
612
613 return 0;
614err_disable_digital_clk:
615 clk_disable_unprepare(dsi->digital_clk);
616err_disable_engine_clk:
617 clk_disable_unprepare(dsi->engine_clk);
618err_phy_power_off:
619 phy_power_off(dsi->phy);
620err_refcount:
621 dsi->refcount--;
622 return ret;
623}
624
475static void mtk_dsi_poweroff(struct mtk_dsi *dsi) 625static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
476{ 626{
477 if (WARN_ON(dsi->refcount == 0)) 627 if (WARN_ON(dsi->refcount == 0))
@@ -480,8 +630,18 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
480 if (--dsi->refcount != 0) 630 if (--dsi->refcount != 0)
481 return; 631 return;
482 632
483 dsi_lane0_ulp_mode_enter(dsi); 633 if (!mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500)) {
484 dsi_clk_ulp_mode_enter(dsi); 634 if (dsi->panel) {
635 if (drm_panel_unprepare(dsi->panel)) {
636 DRM_ERROR("failed to unprepare the panel\n");
637 return;
638 }
639 }
640 }
641
642 mtk_dsi_reset_engine(dsi);
643 mtk_dsi_lane0_ulp_mode_enter(dsi);
644 mtk_dsi_clk_ulp_mode_enter(dsi);
485 645
486 mtk_dsi_disable(dsi); 646 mtk_dsi_disable(dsi);
487 647
@@ -498,35 +658,30 @@ static void mtk_output_dsi_enable(struct mtk_dsi *dsi)
498 if (dsi->enabled) 658 if (dsi->enabled)
499 return; 659 return;
500 660
501 if (dsi->panel) {
502 if (drm_panel_prepare(dsi->panel)) {
503 DRM_ERROR("failed to setup the panel\n");
504 return;
505 }
506 }
507
508 ret = mtk_dsi_poweron(dsi); 661 ret = mtk_dsi_poweron(dsi);
509 if (ret < 0) { 662 if (ret < 0) {
510 DRM_ERROR("failed to power on dsi\n"); 663 DRM_ERROR("failed to power on dsi\n");
511 return; 664 return;
512 } 665 }
513 666
514 dsi_rxtx_control(dsi); 667 mtk_dsi_set_mode(dsi);
515 668 mtk_dsi_clk_hs_mode(dsi, 1);
516 dsi_clk_ulp_mode_leave(dsi);
517 dsi_lane0_ulp_mode_leave(dsi);
518 dsi_clk_hs_mode(dsi, 0);
519 dsi_set_mode(dsi);
520
521 dsi_ps_control_vact(dsi);
522 dsi_config_vdo_timing(dsi);
523
524 dsi_set_mode(dsi);
525 dsi_clk_hs_mode(dsi, 1);
526 669
527 mtk_dsi_start(dsi); 670 mtk_dsi_start(dsi);
528 671
672 if (dsi->panel) {
673 if (drm_panel_enable(dsi->panel)) {
674 DRM_ERROR("failed to enable the panel\n");
675 goto err_dsi_power_off;
676 }
677 }
678
529 dsi->enabled = true; 679 dsi->enabled = true;
680
681 return;
682err_dsi_power_off:
683 mtk_dsi_stop(dsi);
684 mtk_dsi_poweroff(dsi);
530} 685}
531 686
532static void mtk_output_dsi_disable(struct mtk_dsi *dsi) 687static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
@@ -541,6 +696,7 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
541 } 696 }
542 } 697 }
543 698
699 mtk_dsi_stop(dsi);
544 mtk_dsi_poweroff(dsi); 700 mtk_dsi_poweroff(dsi);
545 701
546 dsi->enabled = false; 702 dsi->enabled = false;
@@ -742,9 +898,149 @@ static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
742 return 0; 898 return 0;
743} 899}
744 900
901static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi)
902{
903 u32 timeout_ms = 500000; /* total 1s ~ 2s timeout */
904
905 while (timeout_ms--) {
906 if (!(readl(dsi->regs + DSI_INTSTA) & DSI_BUSY))
907 break;
908
909 usleep_range(2, 4);
910 }
911
912 if (timeout_ms == 0) {
913 DRM_WARN("polling dsi wait not busy timeout!\n");
914
915 mtk_dsi_enable(dsi);
916 mtk_dsi_reset_engine(dsi);
917 }
918}
919
920static u32 mtk_dsi_recv_cnt(u8 type, u8 *read_data)
921{
922 switch (type) {
923 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
924 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
925 return 1;
926 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
927 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
928 return 2;
929 case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
930 case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
931 return read_data[1] + read_data[2] * 16;
932 case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
933 DRM_INFO("type is 0x02, try again\n");
934 break;
935 default:
936 DRM_INFO("type(0x%x) cannot be non-recognite\n", type);
937 break;
938 }
939
940 return 0;
941}
942
943static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg)
944{
945 const char *tx_buf = msg->tx_buf;
946 u8 config, cmdq_size, cmdq_off, type = msg->type;
947 u32 reg_val, cmdq_mask, i;
948
949 if (MTK_DSI_HOST_IS_READ(type))
950 config = BTA;
951 else
952 config = (msg->tx_len > 2) ? LONG_PACKET : SHORT_PACKET;
953
954 if (msg->tx_len > 2) {
955 cmdq_size = 1 + (msg->tx_len + 3) / 4;
956 cmdq_off = 4;
957 cmdq_mask = CONFIG | DATA_ID | DATA_0 | DATA_1;
958 reg_val = (msg->tx_len << 16) | (type << 8) | config;
959 } else {
960 cmdq_size = 1;
961 cmdq_off = 2;
962 cmdq_mask = CONFIG | DATA_ID;
963 reg_val = (type << 8) | config;
964 }
965
966 for (i = 0; i < msg->tx_len; i++)
967 writeb(tx_buf[i], dsi->regs + DSI_CMDQ0 + cmdq_off + i);
968
969 mtk_dsi_mask(dsi, DSI_CMDQ0, cmdq_mask, reg_val);
970 mtk_dsi_mask(dsi, DSI_CMDQ_SIZE, CMDQ_SIZE, cmdq_size);
971}
972
973static ssize_t mtk_dsi_host_send_cmd(struct mtk_dsi *dsi,
974 const struct mipi_dsi_msg *msg, u8 flag)
975{
976 mtk_dsi_wait_for_idle(dsi);
977 mtk_dsi_irq_data_clear(dsi, flag);
978 mtk_dsi_cmdq(dsi, msg);
979 mtk_dsi_start(dsi);
980
981 if (!mtk_dsi_wait_for_irq_done(dsi, flag, 2000))
982 return -ETIME;
983 else
984 return 0;
985}
986
987static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
988 const struct mipi_dsi_msg *msg)
989{
990 struct mtk_dsi *dsi = host_to_dsi(host);
991 u32 recv_cnt, i;
992 u8 read_data[16];
993 void *src_addr;
994 u8 irq_flag = CMD_DONE_INT_FLAG;
995
996 if (readl(dsi->regs + DSI_MODE_CTRL) & MODE) {
997 DRM_ERROR("dsi engine is not command mode\n");
998 return -EINVAL;
999 }
1000
1001 if (MTK_DSI_HOST_IS_READ(msg->type))
1002 irq_flag |= LPRX_RD_RDY_INT_FLAG;
1003
1004 if (mtk_dsi_host_send_cmd(dsi, msg, irq_flag) < 0)
1005 return -ETIME;
1006
1007 if (!MTK_DSI_HOST_IS_READ(msg->type))
1008 return 0;
1009
1010 if (!msg->rx_buf) {
1011 DRM_ERROR("dsi receive buffer size may be NULL\n");
1012 return -EINVAL;
1013 }
1014
1015 for (i = 0; i < 16; i++)
1016 *(read_data + i) = readb(dsi->regs + DSI_RX_DATA0 + i);
1017
1018 recv_cnt = mtk_dsi_recv_cnt(read_data[0], read_data);
1019
1020 if (recv_cnt > 2)
1021 src_addr = &read_data[4];
1022 else
1023 src_addr = &read_data[1];
1024
1025 if (recv_cnt > 10)
1026 recv_cnt = 10;
1027
1028 if (recv_cnt > msg->rx_len)
1029 recv_cnt = msg->rx_len;
1030
1031 if (recv_cnt)
1032 memcpy(msg->rx_buf, src_addr, recv_cnt);
1033
1034 DRM_INFO("dsi get %d byte data from the panel address(0x%x)\n",
1035 recv_cnt, *((u8 *)(msg->tx_buf)));
1036
1037 return recv_cnt;
1038}
1039
745static const struct mipi_dsi_host_ops mtk_dsi_ops = { 1040static const struct mipi_dsi_host_ops mtk_dsi_ops = {
746 .attach = mtk_dsi_host_attach, 1041 .attach = mtk_dsi_host_attach,
747 .detach = mtk_dsi_host_detach, 1042 .detach = mtk_dsi_host_detach,
1043 .transfer = mtk_dsi_host_transfer,
748}; 1044};
749 1045
750static int mtk_dsi_bind(struct device *dev, struct device *master, void *data) 1046static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
@@ -801,8 +1097,8 @@ static int mtk_dsi_probe(struct platform_device *pdev)
801{ 1097{
802 struct mtk_dsi *dsi; 1098 struct mtk_dsi *dsi;
803 struct device *dev = &pdev->dev; 1099 struct device *dev = &pdev->dev;
804 struct device_node *remote_node, *endpoint;
805 struct resource *regs; 1100 struct resource *regs;
1101 int irq_num;
806 int comp_id; 1102 int comp_id;
807 int ret; 1103 int ret;
808 1104
@@ -813,22 +1109,10 @@ static int mtk_dsi_probe(struct platform_device *pdev)
813 dsi->host.ops = &mtk_dsi_ops; 1109 dsi->host.ops = &mtk_dsi_ops;
814 dsi->host.dev = dev; 1110 dsi->host.dev = dev;
815 1111
816 endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); 1112 ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
817 if (endpoint) { 1113 &dsi->panel, &dsi->bridge);
818 remote_node = of_graph_get_remote_port_parent(endpoint); 1114 if (ret)
819 if (!remote_node) { 1115 return ret;
820 dev_err(dev, "No panel connected\n");
821 return -ENODEV;
822 }
823
824 dsi->bridge = of_drm_find_bridge(remote_node);
825 dsi->panel = of_drm_find_panel(remote_node);
826 of_node_put(remote_node);
827 if (!dsi->bridge && !dsi->panel) {
828 dev_info(dev, "Waiting for bridge or panel driver\n");
829 return -EPROBE_DEFER;
830 }
831 }
832 1116
833 dsi->engine_clk = devm_clk_get(dev, "engine"); 1117 dsi->engine_clk = devm_clk_get(dev, "engine");
834 if (IS_ERR(dsi->engine_clk)) { 1118 if (IS_ERR(dsi->engine_clk)) {
@@ -879,6 +1163,22 @@ static int mtk_dsi_probe(struct platform_device *pdev)
879 return ret; 1163 return ret;
880 } 1164 }
881 1165
1166 irq_num = platform_get_irq(pdev, 0);
1167 if (irq_num < 0) {
1168 dev_err(&pdev->dev, "failed to request dsi irq resource\n");
1169 return -EPROBE_DEFER;
1170 }
1171
1172 irq_set_status_flags(irq_num, IRQ_TYPE_LEVEL_LOW);
1173 ret = devm_request_irq(&pdev->dev, irq_num, mtk_dsi_irq,
1174 IRQF_TRIGGER_LOW, dev_name(&pdev->dev), dsi);
1175 if (ret) {
1176 dev_err(&pdev->dev, "failed to request mediatek dsi irq\n");
1177 return -EPROBE_DEFER;
1178 }
1179
1180 init_waitqueue_head(&dsi->irq_wait_queue);
1181
882 platform_set_drvdata(pdev, dsi); 1182 platform_set_drvdata(pdev, dsi);
883 1183
884 return component_add(&pdev->dev, &mtk_dsi_component_ops); 1184 return component_add(&pdev->dev, &mtk_dsi_component_ops);
@@ -895,6 +1195,7 @@ static int mtk_dsi_remove(struct platform_device *pdev)
895} 1195}
896 1196
897static const struct of_device_id mtk_dsi_of_match[] = { 1197static const struct of_device_id mtk_dsi_of_match[] = {
1198 { .compatible = "mediatek,mt2701-dsi" },
898 { .compatible = "mediatek,mt8173-dsi" }, 1199 { .compatible = "mediatek,mt8173-dsi" },
899 { }, 1200 { },
900}; 1201};
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index c26251260b83..41a1c03b0347 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1434,7 +1434,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
1434{ 1434{
1435 struct device *dev = &pdev->dev; 1435 struct device *dev = &pdev->dev;
1436 struct device_node *np = dev->of_node; 1436 struct device_node *np = dev->of_node;
1437 struct device_node *cec_np, *port, *ep, *remote, *i2c_np; 1437 struct device_node *cec_np, *remote, *i2c_np;
1438 struct platform_device *cec_pdev; 1438 struct platform_device *cec_pdev;
1439 struct regmap *regmap; 1439 struct regmap *regmap;
1440 struct resource *mem; 1440 struct resource *mem;
@@ -1486,29 +1486,9 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
1486 if (IS_ERR(hdmi->regs)) 1486 if (IS_ERR(hdmi->regs))
1487 return PTR_ERR(hdmi->regs); 1487 return PTR_ERR(hdmi->regs);
1488 1488
1489 port = of_graph_get_port_by_id(np, 1); 1489 remote = of_graph_get_remote_node(np, 1, 0);
1490 if (!port) { 1490 if (!remote)
1491 dev_err(dev, "Missing output port node\n");
1492 return -EINVAL; 1491 return -EINVAL;
1493 }
1494
1495 ep = of_get_child_by_name(port, "endpoint");
1496 if (!ep) {
1497 dev_err(dev, "Missing endpoint node in port %s\n",
1498 port->full_name);
1499 of_node_put(port);
1500 return -EINVAL;
1501 }
1502 of_node_put(port);
1503
1504 remote = of_graph_get_remote_port_parent(ep);
1505 if (!remote) {
1506 dev_err(dev, "Missing connector/bridge node for endpoint %s\n",
1507 ep->full_name);
1508 of_node_put(ep);
1509 return -EINVAL;
1510 }
1511 of_node_put(ep);
1512 1492
1513 if (!of_device_is_compatible(remote, "hdmi-connector")) { 1493 if (!of_device_is_compatible(remote, "hdmi-connector")) {
1514 hdmi->next_bridge = of_drm_find_bridge(remote); 1494 hdmi->next_bridge = of_drm_find_bridge(remote);
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
index 1c366f8cb2d0..90e913108950 100644
--- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
@@ -16,6 +16,7 @@
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/of_device.h>
19#include <linux/platform_device.h> 20#include <linux/platform_device.h>
20#include <linux/phy/phy.h> 21#include <linux/phy/phy.h>
21 22
@@ -87,6 +88,9 @@
87 88
88#define MIPITX_DSI_PLL_CON2 0x58 89#define MIPITX_DSI_PLL_CON2 0x58
89 90
91#define MIPITX_DSI_PLL_TOP 0x64
92#define RG_DSI_MPPLL_PRESERVE (0xff << 8)
93
90#define MIPITX_DSI_PLL_PWR 0x68 94#define MIPITX_DSI_PLL_PWR 0x68
91#define RG_DSI_MPPLL_SDM_PWR_ON BIT(0) 95#define RG_DSI_MPPLL_SDM_PWR_ON BIT(0)
92#define RG_DSI_MPPLL_SDM_ISO_EN BIT(1) 96#define RG_DSI_MPPLL_SDM_ISO_EN BIT(1)
@@ -123,10 +127,15 @@
123#define SW_LNT2_HSTX_PRE_OE BIT(24) 127#define SW_LNT2_HSTX_PRE_OE BIT(24)
124#define SW_LNT2_HSTX_OE BIT(25) 128#define SW_LNT2_HSTX_OE BIT(25)
125 129
130struct mtk_mipitx_data {
131 const u32 mppll_preserve;
132};
133
126struct mtk_mipi_tx { 134struct mtk_mipi_tx {
127 struct device *dev; 135 struct device *dev;
128 void __iomem *regs; 136 void __iomem *regs;
129 unsigned int data_rate; 137 u32 data_rate;
138 const struct mtk_mipitx_data *driver_data;
130 struct clk_hw pll_hw; 139 struct clk_hw pll_hw;
131 struct clk *pll; 140 struct clk *pll;
132}; 141};
@@ -163,7 +172,7 @@ static void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
163static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw) 172static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
164{ 173{
165 struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw); 174 struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
166 unsigned int txdiv, txdiv0, txdiv1; 175 u8 txdiv, txdiv0, txdiv1;
167 u64 pcw; 176 u64 pcw;
168 177
169 dev_dbg(mipi_tx->dev, "prepare: %u Hz\n", mipi_tx->data_rate); 178 dev_dbg(mipi_tx->dev, "prepare: %u Hz\n", mipi_tx->data_rate);
@@ -243,6 +252,10 @@ static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
243 mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1, 252 mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
244 RG_DSI_MPPLL_SDM_SSC_EN); 253 RG_DSI_MPPLL_SDM_SSC_EN);
245 254
255 mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
256 RG_DSI_MPPLL_PRESERVE,
257 mipi_tx->driver_data->mppll_preserve);
258
246 return 0; 259 return 0;
247} 260}
248 261
@@ -255,6 +268,9 @@ static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw)
255 mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0, 268 mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
256 RG_DSI_MPPLL_PLL_EN); 269 RG_DSI_MPPLL_PLL_EN);
257 270
271 mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
272 RG_DSI_MPPLL_PRESERVE, 0);
273
258 mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR, 274 mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
259 RG_DSI_MPPLL_SDM_ISO_EN | 275 RG_DSI_MPPLL_SDM_ISO_EN |
260 RG_DSI_MPPLL_SDM_PWR_ON, 276 RG_DSI_MPPLL_SDM_PWR_ON,
@@ -310,7 +326,7 @@ static const struct clk_ops mtk_mipi_tx_pll_ops = {
310static int mtk_mipi_tx_power_on_signal(struct phy *phy) 326static int mtk_mipi_tx_power_on_signal(struct phy *phy)
311{ 327{
312 struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy); 328 struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
313 unsigned int reg; 329 u32 reg;
314 330
315 for (reg = MIPITX_DSI_CLOCK_LANE; 331 for (reg = MIPITX_DSI_CLOCK_LANE;
316 reg <= MIPITX_DSI_DATA_LANE3; reg += 4) 332 reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
@@ -341,7 +357,7 @@ static int mtk_mipi_tx_power_on(struct phy *phy)
341static void mtk_mipi_tx_power_off_signal(struct phy *phy) 357static void mtk_mipi_tx_power_off_signal(struct phy *phy)
342{ 358{
343 struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy); 359 struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
344 unsigned int reg; 360 u32 reg;
345 361
346 mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON, 362 mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON,
347 RG_DSI_PAD_TIE_LOW_EN); 363 RG_DSI_PAD_TIE_LOW_EN);
@@ -391,6 +407,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
391 if (!mipi_tx) 407 if (!mipi_tx)
392 return -ENOMEM; 408 return -ENOMEM;
393 409
410 mipi_tx->driver_data = of_device_get_match_data(dev);
394 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 411 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
395 mipi_tx->regs = devm_ioremap_resource(dev, mem); 412 mipi_tx->regs = devm_ioremap_resource(dev, mem);
396 if (IS_ERR(mipi_tx->regs)) { 413 if (IS_ERR(mipi_tx->regs)) {
@@ -448,8 +465,19 @@ static int mtk_mipi_tx_remove(struct platform_device *pdev)
448 return 0; 465 return 0;
449} 466}
450 467
468static const struct mtk_mipitx_data mt2701_mipitx_data = {
469 .mppll_preserve = (3 << 8)
470};
471
472static const struct mtk_mipitx_data mt8173_mipitx_data = {
473 .mppll_preserve = (0 << 8)
474};
475
451static const struct of_device_id mtk_mipi_tx_match[] = { 476static const struct of_device_id mtk_mipi_tx_match[] = {
452 { .compatible = "mediatek,mt8173-mipi-tx", }, 477 { .compatible = "mediatek,mt2701-mipi-tx",
478 .data = &mt2701_mipitx_data },
479 { .compatible = "mediatek,mt8173-mipi-tx",
480 .data = &mt8173_mipitx_data },
453 {}, 481 {},
454}; 482};
455 483
diff --git a/drivers/gpu/drm/meson/Kconfig b/drivers/gpu/drm/meson/Kconfig
index 99719afcc77f..3ce51d8dfe1c 100644
--- a/drivers/gpu/drm/meson/Kconfig
+++ b/drivers/gpu/drm/meson/Kconfig
@@ -7,3 +7,9 @@ config DRM_MESON
7 select DRM_GEM_CMA_HELPER 7 select DRM_GEM_CMA_HELPER
8 select VIDEOMODE_HELPERS 8 select VIDEOMODE_HELPERS
9 select REGMAP_MMIO 9 select REGMAP_MMIO
10
11config DRM_MESON_DW_HDMI
12 tristate "HDMI Synopsys Controller support for Amlogic Meson Display"
13 depends on DRM_MESON
14 default y if DRM_MESON
15 select DRM_DW_HDMI
diff --git a/drivers/gpu/drm/meson/Makefile b/drivers/gpu/drm/meson/Makefile
index 92cf84530f49..c5c4cc362f02 100644
--- a/drivers/gpu/drm/meson/Makefile
+++ b/drivers/gpu/drm/meson/Makefile
@@ -2,3 +2,4 @@ meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o
2meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o 2meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o
3 3
4obj-$(CONFIG_DRM_MESON) += meson-drm.o 4obj-$(CONFIG_DRM_MESON) += meson-drm.o
5obj-$(CONFIG_DRM_MESON_DW_HDMI) += meson_dw_hdmi.o
diff --git a/drivers/gpu/drm/meson/meson_canvas.c b/drivers/gpu/drm/meson/meson_canvas.c
index 4109e36c297f..08f6073d967e 100644
--- a/drivers/gpu/drm/meson/meson_canvas.c
+++ b/drivers/gpu/drm/meson/meson_canvas.c
@@ -24,7 +24,9 @@
24#include "meson_canvas.h" 24#include "meson_canvas.h"
25#include "meson_registers.h" 25#include "meson_registers.h"
26 26
27/* 27/**
28 * DOC: Canvas
29 *
28 * CANVAS is a memory zone where physical memory frames information 30 * CANVAS is a memory zone where physical memory frames information
29 * are stored for the VIU to scanout. 31 * are stored for the VIU to scanout.
30 */ 32 */
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 0fe49eccda65..c986eb03b9d9 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -82,11 +82,18 @@ static const struct drm_crtc_funcs meson_crtc_funcs = {
82static void meson_crtc_enable(struct drm_crtc *crtc) 82static void meson_crtc_enable(struct drm_crtc *crtc)
83{ 83{
84 struct meson_crtc *meson_crtc = to_meson_crtc(crtc); 84 struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
85 struct drm_plane *plane = meson_crtc->priv->primary_plane; 85 struct drm_crtc_state *crtc_state = crtc->state;
86 struct meson_drm *priv = meson_crtc->priv; 86 struct meson_drm *priv = meson_crtc->priv;
87 87
88 DRM_DEBUG_DRIVER("\n");
89
90 if (!crtc_state) {
91 DRM_ERROR("Invalid crtc_state\n");
92 return;
93 }
94
88 /* Enable VPP Postblend */ 95 /* Enable VPP Postblend */
89 writel(plane->state->crtc_w, 96 writel(crtc_state->mode.hdisplay,
90 priv->io_base + _REG(VPP_POSTBLEND_H_SIZE)); 97 priv->io_base + _REG(VPP_POSTBLEND_H_SIZE));
91 98
92 writel_bits_relaxed(VPP_POSTBLEND_ENABLE, VPP_POSTBLEND_ENABLE, 99 writel_bits_relaxed(VPP_POSTBLEND_ENABLE, VPP_POSTBLEND_ENABLE,
@@ -101,6 +108,7 @@ static void meson_crtc_disable(struct drm_crtc *crtc)
101 struct meson_drm *priv = meson_crtc->priv; 108 struct meson_drm *priv = meson_crtc->priv;
102 109
103 priv->viu.osd1_enabled = false; 110 priv->viu.osd1_enabled = false;
111 priv->viu.osd1_commit = false;
104 112
105 /* Disable VPP Postblend */ 113 /* Disable VPP Postblend */
106 writel_bits_relaxed(VPP_POSTBLEND_ENABLE, 0, 114 writel_bits_relaxed(VPP_POSTBLEND_ENABLE, 0,
@@ -137,8 +145,7 @@ static void meson_crtc_atomic_flush(struct drm_crtc *crtc,
137 struct meson_crtc *meson_crtc = to_meson_crtc(crtc); 145 struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
138 struct meson_drm *priv = meson_crtc->priv; 146 struct meson_drm *priv = meson_crtc->priv;
139 147
140 if (priv->viu.osd1_enabled) 148 priv->viu.osd1_commit = true;
141 priv->viu.osd1_commit = true;
142} 149}
143 150
144static const struct drm_crtc_helper_funcs meson_crtc_helper_funcs = { 151static const struct drm_crtc_helper_funcs meson_crtc_helper_funcs = {
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index bc562a07847b..75382f5f0fce 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -24,6 +24,7 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/mutex.h> 25#include <linux/mutex.h>
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/component.h>
27#include <linux/of_graph.h> 28#include <linux/of_graph.h>
28 29
29#include <drm/drmP.h> 30#include <drm/drmP.h>
@@ -51,13 +52,14 @@
51#define DRIVER_NAME "meson" 52#define DRIVER_NAME "meson"
52#define DRIVER_DESC "Amlogic Meson DRM driver" 53#define DRIVER_DESC "Amlogic Meson DRM driver"
53 54
54/* 55/**
55 * Video Processing Unit 56 * DOC: Video Processing Unit
56 * 57 *
57 * VPU Handles the Global Video Processing, it includes management of the 58 * VPU Handles the Global Video Processing, it includes management of the
58 * clocks gates, blocks reset lines and power domains. 59 * clocks gates, blocks reset lines and power domains.
59 * 60 *
60 * What is missing : 61 * What is missing :
62 *
61 * - Full reset of entire video processing HW blocks 63 * - Full reset of entire video processing HW blocks
62 * - Scaling and setup of the VPU clock 64 * - Scaling and setup of the VPU clock
63 * - Bus clock gates 65 * - Bus clock gates
@@ -150,9 +152,9 @@ static struct regmap_config meson_regmap_config = {
150 .max_register = 0x1000, 152 .max_register = 0x1000,
151}; 153};
152 154
153static int meson_drv_probe(struct platform_device *pdev) 155static int meson_drv_bind(struct device *dev)
154{ 156{
155 struct device *dev = &pdev->dev; 157 struct platform_device *pdev = to_platform_device(dev);
156 struct meson_drm *priv; 158 struct meson_drm *priv;
157 struct drm_device *drm; 159 struct drm_device *drm;
158 struct resource *res; 160 struct resource *res;
@@ -215,6 +217,15 @@ static int meson_drv_probe(struct platform_device *pdev)
215 217
216 drm_vblank_init(drm, 1); 218 drm_vblank_init(drm, 1);
217 drm_mode_config_init(drm); 219 drm_mode_config_init(drm);
220 drm->mode_config.max_width = 3840;
221 drm->mode_config.max_height = 2160;
222 drm->mode_config.funcs = &meson_mode_config_funcs;
223
224 /* Hardware Initialization */
225
226 meson_venc_init(priv);
227 meson_vpp_init(priv);
228 meson_viu_init(priv);
218 229
219 /* Encoder Initialization */ 230 /* Encoder Initialization */
220 231
@@ -222,11 +233,11 @@ static int meson_drv_probe(struct platform_device *pdev)
222 if (ret) 233 if (ret)
223 goto free_drm; 234 goto free_drm;
224 235
225 /* Hardware Initialization */ 236 ret = component_bind_all(drm->dev, drm);
226 237 if (ret) {
227 meson_venc_init(priv); 238 dev_err(drm->dev, "Couldn't bind all components\n");
228 meson_vpp_init(priv); 239 goto free_drm;
229 meson_viu_init(priv); 240 }
230 241
231 ret = meson_plane_create(priv); 242 ret = meson_plane_create(priv);
232 if (ret) 243 if (ret)
@@ -241,9 +252,6 @@ static int meson_drv_probe(struct platform_device *pdev)
241 goto free_drm; 252 goto free_drm;
242 253
243 drm_mode_config_reset(drm); 254 drm_mode_config_reset(drm);
244 drm->mode_config.max_width = 8192;
245 drm->mode_config.max_height = 8192;
246 drm->mode_config.funcs = &meson_mode_config_funcs;
247 255
248 priv->fbdev = drm_fbdev_cma_init(drm, 32, 256 priv->fbdev = drm_fbdev_cma_init(drm, 32,
249 drm->mode_config.num_connector); 257 drm->mode_config.num_connector);
@@ -268,9 +276,9 @@ free_drm:
268 return ret; 276 return ret;
269} 277}
270 278
271static int meson_drv_remove(struct platform_device *pdev) 279static void meson_drv_unbind(struct device *dev)
272{ 280{
273 struct drm_device *drm = dev_get_drvdata(&pdev->dev); 281 struct drm_device *drm = dev_get_drvdata(dev);
274 struct meson_drm *priv = drm->dev_private; 282 struct meson_drm *priv = drm->dev_private;
275 283
276 drm_dev_unregister(drm); 284 drm_dev_unregister(drm);
@@ -280,9 +288,88 @@ static int meson_drv_remove(struct platform_device *pdev)
280 drm_vblank_cleanup(drm); 288 drm_vblank_cleanup(drm);
281 drm_dev_unref(drm); 289 drm_dev_unref(drm);
282 290
283 return 0;
284} 291}
285 292
293static const struct component_master_ops meson_drv_master_ops = {
294 .bind = meson_drv_bind,
295 .unbind = meson_drv_unbind,
296};
297
298static int compare_of(struct device *dev, void *data)
299{
300 DRM_DEBUG_DRIVER("Comparing of node %s with %s\n",
301 of_node_full_name(dev->of_node),
302 of_node_full_name(data));
303
304 return dev->of_node == data;
305}
306
307/* Possible connectors nodes to ignore */
308static const struct of_device_id connectors_match[] = {
309 { .compatible = "composite-video-connector" },
310 { .compatible = "svideo-connector" },
311 { .compatible = "hdmi-connector" },
312 { .compatible = "dvi-connector" },
313 {}
314};
315
316static int meson_probe_remote(struct platform_device *pdev,
317 struct component_match **match,
318 struct device_node *parent,
319 struct device_node *remote)
320{
321 struct device_node *ep, *remote_node;
322 int count = 1;
323
324 /* If node is a connector, return and do not add to match table */
325 if (of_match_node(connectors_match, remote))
326 return 1;
327
328 component_match_add(&pdev->dev, match, compare_of, remote);
329
330 for_each_endpoint_of_node(remote, ep) {
331 remote_node = of_graph_get_remote_port_parent(ep);
332 if (!remote_node ||
333 remote_node == parent || /* Ignore parent endpoint */
334 !of_device_is_available(remote_node))
335 continue;
336
337 count += meson_probe_remote(pdev, match, remote, remote_node);
338
339 of_node_put(remote_node);
340 }
341
342 return count;
343}
344
345static int meson_drv_probe(struct platform_device *pdev)
346{
347 struct component_match *match = NULL;
348 struct device_node *np = pdev->dev.of_node;
349 struct device_node *ep, *remote;
350 int count = 0;
351
352 for_each_endpoint_of_node(np, ep) {
353 remote = of_graph_get_remote_port_parent(ep);
354 if (!remote || !of_device_is_available(remote))
355 continue;
356
357 count += meson_probe_remote(pdev, &match, np, remote);
358 }
359
360 /* If some endpoints were found, initialize the nodes */
361 if (count) {
362 dev_info(&pdev->dev, "Queued %d outputs on vpu\n", count);
363
364 return component_master_add_with_match(&pdev->dev,
365 &meson_drv_master_ops,
366 match);
367 }
368
369 /* If no output endpoints were available, simply bail out */
370 return 0;
371};
372
286static const struct of_device_id dt_match[] = { 373static const struct of_device_id dt_match[] = {
287 { .compatible = "amlogic,meson-gxbb-vpu" }, 374 { .compatible = "amlogic,meson-gxbb-vpu" },
288 { .compatible = "amlogic,meson-gxl-vpu" }, 375 { .compatible = "amlogic,meson-gxl-vpu" },
@@ -293,7 +380,6 @@ MODULE_DEVICE_TABLE(of, dt_match);
293 380
294static struct platform_driver meson_drm_platform_driver = { 381static struct platform_driver meson_drm_platform_driver = {
295 .probe = meson_drv_probe, 382 .probe = meson_drv_probe,
296 .remove = meson_drv_remove,
297 .driver = { 383 .driver = {
298 .name = "meson-drm", 384 .name = "meson-drm",
299 .of_match_table = dt_match, 385 .of_match_table = dt_match,
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index 6195327c51ca..5e8b392b9d1f 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -47,6 +47,9 @@ struct meson_drm {
47 47
48 struct { 48 struct {
49 unsigned int current_mode; 49 unsigned int current_mode;
50 bool hdmi_repeat;
51 bool venc_repeat;
52 bool hdmi_use_enci;
50 } venc; 53 } venc;
51}; 54};
52 55
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
new file mode 100644
index 000000000000..7b86eb7776b3
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -0,0 +1,919 @@
1/*
2 * Copyright (C) 2016 BayLibre, SAS
3 * Author: Neil Armstrong <narmstrong@baylibre.com>
4 * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2 of the
9 * License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/component.h>
23#include <linux/of_graph.h>
24#include <linux/reset.h>
25#include <linux/clk.h>
26
27#include <drm/drmP.h>
28#include <drm/drm_edid.h>
29#include <drm/drm_crtc_helper.h>
30#include <drm/drm_atomic_helper.h>
31#include <drm/bridge/dw_hdmi.h>
32
33#include <uapi/linux/media-bus-format.h>
34#include <uapi/linux/videodev2.h>
35
36#include "meson_drv.h"
37#include "meson_venc.h"
38#include "meson_vclk.h"
39#include "meson_dw_hdmi.h"
40#include "meson_registers.h"
41
42#define DRIVER_NAME "meson-dw-hdmi"
43#define DRIVER_DESC "Amlogic Meson HDMI-TX DRM driver"
44
45/**
46 * DOC: HDMI Output
47 *
48 * HDMI Output is composed of :
49 *
50 * - A Synopsys DesignWare HDMI Controller IP
51 * - A TOP control block controlling the Clocks and PHY
52 * - A custom HDMI PHY in order convert video to TMDS signal
53 *
54 * .. code::
55 *
56 * ___________________________________
57 * | HDMI TOP |<= HPD
58 * |___________________________________|
59 * | | |
60 * | Synopsys HDMI | HDMI PHY |=> TMDS
61 * | Controller |________________|
62 * |___________________________________|<=> DDC
63 *
64 *
65 * The HDMI TOP block only supports HPD sensing.
66 * The Synopsys HDMI Controller interrupt is routed
67 * through the TOP Block interrupt.
68 * Communication to the TOP Block and the Synopsys
69 * HDMI Controller is done a pair of addr+read/write
70 * registers.
71 * The HDMI PHY is configured by registers in the
72 * HHI register block.
73 *
74 * Pixel data arrives in 4:4:4 format from the VENC
75 * block and the VPU HDMI mux selects either the ENCI
76 * encoder for the 576i or 480i formats or the ENCP
77 * encoder for all the other formats including
78 * interlaced HD formats.
79 * The VENC uses a DVI encoder on top of the ENCI
80 * or ENCP encoders to generate DVI timings for the
81 * HDMI controller.
82 *
83 * GXBB, GXL and GXM embeds the Synopsys DesignWare
84 * HDMI TX IP version 2.01a with HDCP and I2C & S/PDIF
85 * audio source interfaces.
86 *
87 * We handle the following features :
88 *
89 * - HPD Rise & Fall interrupt
90 * - HDMI Controller Interrupt
91 * - HDMI PHY Init for 480i to 1080p60
92 * - VENC & HDMI Clock setup for 480i to 1080p60
93 * - VENC Mode setup for 480i to 1080p60
94 *
95 * What is missing :
96 *
97 * - PHY, Clock and Mode setup for 2k && 4k modes
98 * - SDDC Scrambling mode for HDMI 2.0a
99 * - HDCP Setup
100 * - CEC Management
101 */
102
103/* TOP Block Communication Channel */
104#define HDMITX_TOP_ADDR_REG 0x0
105#define HDMITX_TOP_DATA_REG 0x4
106#define HDMITX_TOP_CTRL_REG 0x8
107
108/* Controller Communication Channel */
109#define HDMITX_DWC_ADDR_REG 0x10
110#define HDMITX_DWC_DATA_REG 0x14
111#define HDMITX_DWC_CTRL_REG 0x18
112
113/* HHI Registers */
114#define HHI_MEM_PD_REG0 0x100 /* 0x40 */
115#define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 */
116#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 */
117#define HHI_HDMI_PHY_CNTL1 0x3a4 /* 0xe9 */
118#define HHI_HDMI_PHY_CNTL2 0x3a8 /* 0xea */
119#define HHI_HDMI_PHY_CNTL3 0x3ac /* 0xeb */
120
121static DEFINE_SPINLOCK(reg_lock);
122
123enum meson_venc_source {
124 MESON_VENC_SOURCE_NONE = 0,
125 MESON_VENC_SOURCE_ENCI = 1,
126 MESON_VENC_SOURCE_ENCP = 2,
127};
128
129struct meson_dw_hdmi {
130 struct drm_encoder encoder;
131 struct dw_hdmi_plat_data dw_plat_data;
132 struct meson_drm *priv;
133 struct device *dev;
134 void __iomem *hdmitx;
135 struct reset_control *hdmitx_apb;
136 struct reset_control *hdmitx_ctrl;
137 struct reset_control *hdmitx_phy;
138 struct clk *hdmi_pclk;
139 struct clk *venci_clk;
140 u32 irq_stat;
141};
142#define encoder_to_meson_dw_hdmi(x) \
143 container_of(x, struct meson_dw_hdmi, encoder)
144
145static inline int dw_hdmi_is_compatible(struct meson_dw_hdmi *dw_hdmi,
146 const char *compat)
147{
148 return of_device_is_compatible(dw_hdmi->dev->of_node, compat);
149}
150
151/* PHY (via TOP bridge) and Controller dedicated register interface */
152
153static unsigned int dw_hdmi_top_read(struct meson_dw_hdmi *dw_hdmi,
154 unsigned int addr)
155{
156 unsigned long flags;
157 unsigned int data;
158
159 spin_lock_irqsave(&reg_lock, flags);
160
161 /* ADDR must be written twice */
162 writel(addr & 0xffff, dw_hdmi->hdmitx + HDMITX_TOP_ADDR_REG);
163 writel(addr & 0xffff, dw_hdmi->hdmitx + HDMITX_TOP_ADDR_REG);
164
165 /* Read needs a second DATA read */
166 data = readl(dw_hdmi->hdmitx + HDMITX_TOP_DATA_REG);
167 data = readl(dw_hdmi->hdmitx + HDMITX_TOP_DATA_REG);
168
169 spin_unlock_irqrestore(&reg_lock, flags);
170
171 return data;
172}
173
174static inline void dw_hdmi_top_write(struct meson_dw_hdmi *dw_hdmi,
175 unsigned int addr, unsigned int data)
176{
177 unsigned long flags;
178
179 spin_lock_irqsave(&reg_lock, flags);
180
181 /* ADDR must be written twice */
182 writel(addr & 0xffff, dw_hdmi->hdmitx + HDMITX_TOP_ADDR_REG);
183 writel(addr & 0xffff, dw_hdmi->hdmitx + HDMITX_TOP_ADDR_REG);
184
185 /* Write needs single DATA write */
186 writel(data, dw_hdmi->hdmitx + HDMITX_TOP_DATA_REG);
187
188 spin_unlock_irqrestore(&reg_lock, flags);
189}
190
191/* Helper to change specific bits in PHY registers */
192static inline void dw_hdmi_top_write_bits(struct meson_dw_hdmi *dw_hdmi,
193 unsigned int addr,
194 unsigned int mask,
195 unsigned int val)
196{
197 unsigned int data = dw_hdmi_top_read(dw_hdmi, addr);
198
199 data &= ~mask;
200 data |= val;
201
202 dw_hdmi_top_write(dw_hdmi, addr, data);
203}
204
205static unsigned int dw_hdmi_dwc_read(struct meson_dw_hdmi *dw_hdmi,
206 unsigned int addr)
207{
208 unsigned long flags;
209 unsigned int data;
210
211 spin_lock_irqsave(&reg_lock, flags);
212
213 /* ADDR must be written twice */
214 writel(addr & 0xffff, dw_hdmi->hdmitx + HDMITX_DWC_ADDR_REG);
215 writel(addr & 0xffff, dw_hdmi->hdmitx + HDMITX_DWC_ADDR_REG);
216
217 /* Read needs a second DATA read */
218 data = readl(dw_hdmi->hdmitx + HDMITX_DWC_DATA_REG);
219 data = readl(dw_hdmi->hdmitx + HDMITX_DWC_DATA_REG);
220
221 spin_unlock_irqrestore(&reg_lock, flags);
222
223 return data;
224}
225
226static inline void dw_hdmi_dwc_write(struct meson_dw_hdmi *dw_hdmi,
227 unsigned int addr, unsigned int data)
228{
229 unsigned long flags;
230
231 spin_lock_irqsave(&reg_lock, flags);
232
233 /* ADDR must be written twice */
234 writel(addr & 0xffff, dw_hdmi->hdmitx + HDMITX_DWC_ADDR_REG);
235 writel(addr & 0xffff, dw_hdmi->hdmitx + HDMITX_DWC_ADDR_REG);
236
237 /* Write needs single DATA write */
238 writel(data, dw_hdmi->hdmitx + HDMITX_DWC_DATA_REG);
239
240 spin_unlock_irqrestore(&reg_lock, flags);
241}
242
243/* Helper to change specific bits in controller registers */
244static inline void dw_hdmi_dwc_write_bits(struct meson_dw_hdmi *dw_hdmi,
245 unsigned int addr,
246 unsigned int mask,
247 unsigned int val)
248{
249 unsigned int data = dw_hdmi_dwc_read(dw_hdmi, addr);
250
251 data &= ~mask;
252 data |= val;
253
254 dw_hdmi_dwc_write(dw_hdmi, addr, data);
255}
256
257/* Bridge */
258
259/* Setup PHY bandwidth modes */
260static void meson_hdmi_phy_setup_mode(struct meson_dw_hdmi *dw_hdmi,
261 struct drm_display_mode *mode)
262{
263 struct meson_drm *priv = dw_hdmi->priv;
264 unsigned int pixel_clock = mode->clock;
265
266 if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
267 dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi")) {
268 if (pixel_clock >= 371250) {
269 /* 5.94Gbps, 3.7125Gbps */
270 regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x333d3282);
271 regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2136315b);
272 } else if (pixel_clock >= 297000) {
273 /* 2.97Gbps */
274 regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33303382);
275 regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2036315b);
276 } else if (pixel_clock >= 148500) {
277 /* 1.485Gbps */
278 regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33303362);
279 regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2016315b);
280 } else {
281 /* 742.5Mbps, and below */
282 regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33604142);
283 regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x0016315b);
284 }
285 } else if (dw_hdmi_is_compatible(dw_hdmi,
286 "amlogic,meson-gxbb-dw-hdmi")) {
287 if (pixel_clock >= 371250) {
288 /* 5.94Gbps, 3.7125Gbps */
289 regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33353245);
290 regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2100115b);
291 } else if (pixel_clock >= 297000) {
292 /* 2.97Gbps */
293 regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33634283);
294 regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0xb000115b);
295 } else {
296 /* 1.485Gbps, and below */
297 regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33632122);
298 regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2000115b);
299 }
300 }
301}
302
303static inline void dw_hdmi_phy_reset(struct meson_dw_hdmi *dw_hdmi)
304{
305 struct meson_drm *priv = dw_hdmi->priv;
306
307 /* Enable and software reset */
308 regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, 0xf, 0xf);
309
310 mdelay(2);
311
312 /* Enable and unreset */
313 regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, 0xf, 0xe);
314
315 mdelay(2);
316}
317
318static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi,
319 struct drm_display_mode *mode)
320{
321 struct meson_drm *priv = dw_hdmi->priv;
322 int vic = drm_match_cea_mode(mode);
323 unsigned int vclk_freq;
324 unsigned int venc_freq;
325 unsigned int hdmi_freq;
326
327 vclk_freq = mode->clock;
328
329 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
330 vclk_freq *= 2;
331
332 venc_freq = vclk_freq;
333 hdmi_freq = vclk_freq;
334
335 if (meson_venc_hdmi_venc_repeat(vic))
336 venc_freq *= 2;
337
338 vclk_freq = max(venc_freq, hdmi_freq);
339
340 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
341 venc_freq /= 2;
342
343 DRM_DEBUG_DRIVER("vclk:%d venc=%d hdmi=%d enci=%d\n",
344 vclk_freq, venc_freq, hdmi_freq,
345 priv->venc.hdmi_use_enci);
346
347 meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, vclk_freq,
348 venc_freq, hdmi_freq, priv->venc.hdmi_use_enci);
349}
350
351static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
352 struct drm_display_mode *mode)
353{
354 struct meson_dw_hdmi *dw_hdmi = (struct meson_dw_hdmi *)data;
355 struct meson_drm *priv = dw_hdmi->priv;
356 unsigned int wr_clk =
357 readl_relaxed(priv->io_base + _REG(VPU_HDMI_SETTING));
358
359 DRM_DEBUG_DRIVER("%d:\"%s\"\n", mode->base.id, mode->name);
360
361 /* Enable clocks */
362 regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100);
363
364 /* Bring HDMITX MEM output of power down */
365 regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0);
366
367 /* Bring out of reset */
368 dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_SW_RESET, 0);
369
370 /* Enable internal pixclk, tmds_clk, spdif_clk, i2s_clk, cecclk */
371 dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL,
372 0x3, 0x3);
373 dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL,
374 0x3 << 4, 0x3 << 4);
375
376 /* Enable normal output to PHY */
377 dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12));
378
379 /* TMDS pattern setup (TOFIX pattern for 4k2k scrambling) */
380 dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01, 0x001f001f);
381 dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23, 0x001f001f);
382
383 /* Load TMDS pattern */
384 dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_CNTL, 0x1);
385 msleep(20);
386 dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_CNTL, 0x2);
387
388 /* Setup PHY parameters */
389 meson_hdmi_phy_setup_mode(dw_hdmi, mode);
390
391 /* Setup PHY */
392 regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1,
393 0xffff << 16, 0x0390 << 16);
394
395 /* BIT_INVERT */
396 if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
397 dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi"))
398 regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1,
399 BIT(17), 0);
400 else
401 regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1,
402 BIT(17), BIT(17));
403
404 /* Disable clock, fifo, fifo_wr */
405 regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, 0xf, 0);
406
407 msleep(100);
408
409 /* Reset PHY 3 times in a row */
410 dw_hdmi_phy_reset(dw_hdmi);
411 dw_hdmi_phy_reset(dw_hdmi);
412 dw_hdmi_phy_reset(dw_hdmi);
413
414 /* Temporary Disable VENC video stream */
415 if (priv->venc.hdmi_use_enci)
416 writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN));
417 else
418 writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
419
420 /* Temporary Disable HDMI video stream to HDMI-TX */
421 writel_bits_relaxed(0x3, 0,
422 priv->io_base + _REG(VPU_HDMI_SETTING));
423 writel_bits_relaxed(0xf << 8, 0,
424 priv->io_base + _REG(VPU_HDMI_SETTING));
425
426 /* Re-Enable VENC video stream */
427 if (priv->venc.hdmi_use_enci)
428 writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN));
429 else
430 writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN));
431
432 /* Push back HDMI clock settings */
433 writel_bits_relaxed(0xf << 8, wr_clk & (0xf << 8),
434 priv->io_base + _REG(VPU_HDMI_SETTING));
435
436 /* Enable and Select HDMI video source for HDMI-TX */
437 if (priv->venc.hdmi_use_enci)
438 writel_bits_relaxed(0x3, MESON_VENC_SOURCE_ENCI,
439 priv->io_base + _REG(VPU_HDMI_SETTING));
440 else
441 writel_bits_relaxed(0x3, MESON_VENC_SOURCE_ENCP,
442 priv->io_base + _REG(VPU_HDMI_SETTING));
443
444 return 0;
445}
446
447static void dw_hdmi_phy_disable(struct dw_hdmi *hdmi,
448 void *data)
449{
450 struct meson_dw_hdmi *dw_hdmi = (struct meson_dw_hdmi *)data;
451 struct meson_drm *priv = dw_hdmi->priv;
452
453 DRM_DEBUG_DRIVER("\n");
454
455 regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0);
456}
457
458static enum drm_connector_status dw_hdmi_read_hpd(struct dw_hdmi *hdmi,
459 void *data)
460{
461 struct meson_dw_hdmi *dw_hdmi = (struct meson_dw_hdmi *)data;
462
463 return !!dw_hdmi_top_read(dw_hdmi, HDMITX_TOP_STAT0) ?
464 connector_status_connected : connector_status_disconnected;
465}
466
467static void dw_hdmi_setup_hpd(struct dw_hdmi *hdmi,
468 void *data)
469{
470 struct meson_dw_hdmi *dw_hdmi = (struct meson_dw_hdmi *)data;
471
472 /* Setup HPD Filter */
473 dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_HPD_FILTER,
474 (0xa << 12) | 0xa0);
475
476 /* Clear interrupts */
477 dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
478 HDMITX_TOP_INTR_HPD_RISE | HDMITX_TOP_INTR_HPD_FALL);
479
480 /* Unmask interrupts */
481 dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_INTR_MASKN,
482 HDMITX_TOP_INTR_HPD_RISE | HDMITX_TOP_INTR_HPD_FALL,
483 HDMITX_TOP_INTR_HPD_RISE | HDMITX_TOP_INTR_HPD_FALL);
484}
485
486static const struct dw_hdmi_phy_ops meson_dw_hdmi_phy_ops = {
487 .init = dw_hdmi_phy_init,
488 .disable = dw_hdmi_phy_disable,
489 .read_hpd = dw_hdmi_read_hpd,
490 .setup_hpd = dw_hdmi_setup_hpd,
491};
492
493static irqreturn_t dw_hdmi_top_irq(int irq, void *dev_id)
494{
495 struct meson_dw_hdmi *dw_hdmi = dev_id;
496 u32 stat;
497
498 stat = dw_hdmi_top_read(dw_hdmi, HDMITX_TOP_INTR_STAT);
499 dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_INTR_STAT_CLR, stat);
500
501 /* HPD Events, handle in the threaded interrupt handler */
502 if (stat & (HDMITX_TOP_INTR_HPD_RISE | HDMITX_TOP_INTR_HPD_FALL)) {
503 dw_hdmi->irq_stat = stat;
504 return IRQ_WAKE_THREAD;
505 }
506
507 /* HDMI Controller Interrupt */
508 if (stat & 1)
509 return IRQ_NONE;
510
511 /* TOFIX Handle HDCP Interrupts */
512
513 return IRQ_HANDLED;
514}
515
516/* Threaded interrupt handler to manage HPD events */
517static irqreturn_t dw_hdmi_top_thread_irq(int irq, void *dev_id)
518{
519 struct meson_dw_hdmi *dw_hdmi = dev_id;
520 u32 stat = dw_hdmi->irq_stat;
521
522 /* HPD Events */
523 if (stat & (HDMITX_TOP_INTR_HPD_RISE | HDMITX_TOP_INTR_HPD_FALL)) {
524 bool hpd_connected = false;
525
526 if (stat & HDMITX_TOP_INTR_HPD_RISE)
527 hpd_connected = true;
528
529 dw_hdmi_setup_rx_sense(dw_hdmi->dev, hpd_connected,
530 hpd_connected);
531
532 drm_helper_hpd_irq_event(dw_hdmi->encoder.dev);
533 }
534
535 return IRQ_HANDLED;
536}
537
538/* TOFIX Enable support for non-vic modes */
539static enum drm_mode_status dw_hdmi_mode_valid(struct drm_connector *connector,
540 struct drm_display_mode *mode)
541{
542 unsigned int vclk_freq;
543 unsigned int venc_freq;
544 unsigned int hdmi_freq;
545 int vic = drm_match_cea_mode(mode);
546
547 DRM_DEBUG_DRIVER("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
548 mode->base.id, mode->name, mode->vrefresh, mode->clock,
549 mode->hdisplay, mode->hsync_start,
550 mode->hsync_end, mode->htotal,
551 mode->vdisplay, mode->vsync_start,
552 mode->vsync_end, mode->vtotal, mode->type, mode->flags);
553
554 /* For now, only accept VIC modes */
555 if (!vic)
556 return MODE_BAD;
557
558 /* For now, filter by supported VIC modes */
559 if (!meson_venc_hdmi_supported_vic(vic))
560 return MODE_BAD;
561
562 vclk_freq = mode->clock;
563
564 /* 480i/576i needs global pixel doubling */
565 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
566 vclk_freq *= 2;
567
568 venc_freq = vclk_freq;
569 hdmi_freq = vclk_freq;
570
571 /* VENC double pixels for 1080i and 720p modes */
572 if (meson_venc_hdmi_venc_repeat(vic))
573 venc_freq *= 2;
574
575 vclk_freq = max(venc_freq, hdmi_freq);
576
577 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
578 venc_freq /= 2;
579
580 dev_dbg(connector->dev->dev, "%s: vclk:%d venc=%d hdmi=%d\n", __func__,
581 vclk_freq, venc_freq, hdmi_freq);
582
583 /* Finally filter by configurable vclk frequencies */
584 switch (vclk_freq) {
585 case 54000:
586 case 74250:
587 case 148500:
588 case 297000:
589 case 594000:
590 return MODE_OK;
591 }
592
593 return MODE_CLOCK_RANGE;
594}
595
596/* Encoder */
597
598static void meson_venc_hdmi_encoder_destroy(struct drm_encoder *encoder)
599{
600 drm_encoder_cleanup(encoder);
601}
602
603static const struct drm_encoder_funcs meson_venc_hdmi_encoder_funcs = {
604 .destroy = meson_venc_hdmi_encoder_destroy,
605};
606
607static int meson_venc_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
608 struct drm_crtc_state *crtc_state,
609 struct drm_connector_state *conn_state)
610{
611 return 0;
612}
613
614static void meson_venc_hdmi_encoder_disable(struct drm_encoder *encoder)
615{
616 struct meson_dw_hdmi *dw_hdmi = encoder_to_meson_dw_hdmi(encoder);
617 struct meson_drm *priv = dw_hdmi->priv;
618
619 DRM_DEBUG_DRIVER("\n");
620
621 writel_bits_relaxed(0x3, 0,
622 priv->io_base + _REG(VPU_HDMI_SETTING));
623
624 writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN));
625 writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
626}
627
628static void meson_venc_hdmi_encoder_enable(struct drm_encoder *encoder)
629{
630 struct meson_dw_hdmi *dw_hdmi = encoder_to_meson_dw_hdmi(encoder);
631 struct meson_drm *priv = dw_hdmi->priv;
632
633 DRM_DEBUG_DRIVER("%s\n", priv->venc.hdmi_use_enci ? "VENCI" : "VENCP");
634
635 if (priv->venc.hdmi_use_enci)
636 writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN));
637 else
638 writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN));
639}
640
641static void meson_venc_hdmi_encoder_mode_set(struct drm_encoder *encoder,
642 struct drm_display_mode *mode,
643 struct drm_display_mode *adjusted_mode)
644{
645 struct meson_dw_hdmi *dw_hdmi = encoder_to_meson_dw_hdmi(encoder);
646 struct meson_drm *priv = dw_hdmi->priv;
647 int vic = drm_match_cea_mode(mode);
648
649 DRM_DEBUG_DRIVER("%d:\"%s\" vic %d\n",
650 mode->base.id, mode->name, vic);
651
652 /* Should have been filtered */
653 if (!vic)
654 return;
655
656 /* VENC + VENC-DVI Mode setup */
657 meson_venc_hdmi_mode_set(priv, vic, mode);
658
659 /* VCLK Set clock */
660 dw_hdmi_set_vclk(dw_hdmi, mode);
661
662 /* Setup YUV444 to HDMI-TX, no 10bit diphering */
663 writel_relaxed(0, priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
664}
665
666static const struct drm_encoder_helper_funcs
667 meson_venc_hdmi_encoder_helper_funcs = {
668 .atomic_check = meson_venc_hdmi_encoder_atomic_check,
669 .disable = meson_venc_hdmi_encoder_disable,
670 .enable = meson_venc_hdmi_encoder_enable,
671 .mode_set = meson_venc_hdmi_encoder_mode_set,
672};
673
674/* DW HDMI Regmap */
675
676static int meson_dw_hdmi_reg_read(void *context, unsigned int reg,
677 unsigned int *result)
678{
679 *result = dw_hdmi_dwc_read(context, reg);
680
681 return 0;
682
683}
684
685static int meson_dw_hdmi_reg_write(void *context, unsigned int reg,
686 unsigned int val)
687{
688 dw_hdmi_dwc_write(context, reg, val);
689
690 return 0;
691}
692
693static const struct regmap_config meson_dw_hdmi_regmap_config = {
694 .reg_bits = 32,
695 .val_bits = 8,
696 .reg_read = meson_dw_hdmi_reg_read,
697 .reg_write = meson_dw_hdmi_reg_write,
698 .max_register = 0x10000,
699};
700
701static bool meson_hdmi_connector_is_available(struct device *dev)
702{
703 struct device_node *ep, *remote;
704
705 /* HDMI Connector is on the second port, first endpoint */
706 ep = of_graph_get_endpoint_by_regs(dev->of_node, 1, 0);
707 if (!ep)
708 return false;
709
710 /* If the endpoint node exists, consider it enabled */
711 remote = of_graph_get_remote_port(ep);
712 if (remote) {
713 of_node_put(ep);
714 return true;
715 }
716
717 of_node_put(ep);
718 of_node_put(remote);
719
720 return false;
721}
722
723static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
724 void *data)
725{
726 struct platform_device *pdev = to_platform_device(dev);
727 struct meson_dw_hdmi *meson_dw_hdmi;
728 struct drm_device *drm = data;
729 struct meson_drm *priv = drm->dev_private;
730 struct dw_hdmi_plat_data *dw_plat_data;
731 struct drm_encoder *encoder;
732 struct resource *res;
733 int irq;
734 int ret;
735
736 DRM_DEBUG_DRIVER("\n");
737
738 if (!meson_hdmi_connector_is_available(dev)) {
739 dev_info(drm->dev, "HDMI Output connector not available\n");
740 return -ENODEV;
741 }
742
743 meson_dw_hdmi = devm_kzalloc(dev, sizeof(*meson_dw_hdmi),
744 GFP_KERNEL);
745 if (!meson_dw_hdmi)
746 return -ENOMEM;
747
748 meson_dw_hdmi->priv = priv;
749 meson_dw_hdmi->dev = dev;
750 dw_plat_data = &meson_dw_hdmi->dw_plat_data;
751 encoder = &meson_dw_hdmi->encoder;
752
753 meson_dw_hdmi->hdmitx_apb = devm_reset_control_get_exclusive(dev,
754 "hdmitx_apb");
755 if (IS_ERR(meson_dw_hdmi->hdmitx_apb)) {
756 dev_err(dev, "Failed to get hdmitx_apb reset\n");
757 return PTR_ERR(meson_dw_hdmi->hdmitx_apb);
758 }
759
760 meson_dw_hdmi->hdmitx_ctrl = devm_reset_control_get_exclusive(dev,
761 "hdmitx");
762 if (IS_ERR(meson_dw_hdmi->hdmitx_ctrl)) {
763 dev_err(dev, "Failed to get hdmitx reset\n");
764 return PTR_ERR(meson_dw_hdmi->hdmitx_ctrl);
765 }
766
767 meson_dw_hdmi->hdmitx_phy = devm_reset_control_get_exclusive(dev,
768 "hdmitx_phy");
769 if (IS_ERR(meson_dw_hdmi->hdmitx_phy)) {
770 dev_err(dev, "Failed to get hdmitx_phy reset\n");
771 return PTR_ERR(meson_dw_hdmi->hdmitx_phy);
772 }
773
774 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
775 meson_dw_hdmi->hdmitx = devm_ioremap_resource(dev, res);
776 if (IS_ERR(meson_dw_hdmi->hdmitx))
777 return PTR_ERR(meson_dw_hdmi->hdmitx);
778
779 meson_dw_hdmi->hdmi_pclk = devm_clk_get(dev, "isfr");
780 if (IS_ERR(meson_dw_hdmi->hdmi_pclk)) {
781 dev_err(dev, "Unable to get HDMI pclk\n");
782 return PTR_ERR(meson_dw_hdmi->hdmi_pclk);
783 }
784 clk_prepare_enable(meson_dw_hdmi->hdmi_pclk);
785
786 meson_dw_hdmi->venci_clk = devm_clk_get(dev, "venci");
787 if (IS_ERR(meson_dw_hdmi->venci_clk)) {
788 dev_err(dev, "Unable to get venci clk\n");
789 return PTR_ERR(meson_dw_hdmi->venci_clk);
790 }
791 clk_prepare_enable(meson_dw_hdmi->venci_clk);
792
793 dw_plat_data->regm = devm_regmap_init(dev, NULL, meson_dw_hdmi,
794 &meson_dw_hdmi_regmap_config);
795 if (IS_ERR(dw_plat_data->regm))
796 return PTR_ERR(dw_plat_data->regm);
797
798 irq = platform_get_irq(pdev, 0);
799 if (irq < 0) {
800 dev_err(dev, "Failed to get hdmi top irq\n");
801 return irq;
802 }
803
804 ret = devm_request_threaded_irq(dev, irq, dw_hdmi_top_irq,
805 dw_hdmi_top_thread_irq, IRQF_SHARED,
806 "dw_hdmi_top_irq", meson_dw_hdmi);
807 if (ret) {
808 dev_err(dev, "Failed to request hdmi top irq\n");
809 return ret;
810 }
811
812 /* Encoder */
813
814 drm_encoder_helper_add(encoder, &meson_venc_hdmi_encoder_helper_funcs);
815
816 ret = drm_encoder_init(drm, encoder, &meson_venc_hdmi_encoder_funcs,
817 DRM_MODE_ENCODER_TMDS, "meson_hdmi");
818 if (ret) {
819 dev_err(priv->dev, "Failed to init HDMI encoder\n");
820 return ret;
821 }
822
823 encoder->possible_crtcs = BIT(0);
824
825 DRM_DEBUG_DRIVER("encoder initialized\n");
826
827 /* Enable clocks */
828 regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100);
829
830 /* Bring HDMITX MEM output of power down */
831 regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0);
832
833 /* Reset HDMITX APB & TX & PHY */
834 reset_control_reset(meson_dw_hdmi->hdmitx_apb);
835 reset_control_reset(meson_dw_hdmi->hdmitx_ctrl);
836 reset_control_reset(meson_dw_hdmi->hdmitx_phy);
837
838 /* Enable APB3 fail on error */
839 writel_bits_relaxed(BIT(15), BIT(15),
840 meson_dw_hdmi->hdmitx + HDMITX_TOP_CTRL_REG);
841 writel_bits_relaxed(BIT(15), BIT(15),
842 meson_dw_hdmi->hdmitx + HDMITX_DWC_CTRL_REG);
843
844 /* Bring out of reset */
845 dw_hdmi_top_write(meson_dw_hdmi, HDMITX_TOP_SW_RESET, 0);
846
847 msleep(20);
848
849 dw_hdmi_top_write(meson_dw_hdmi, HDMITX_TOP_CLK_CNTL, 0xff);
850
851 /* Enable HDMI-TX Interrupt */
852 dw_hdmi_top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
853 HDMITX_TOP_INTR_CORE);
854
855 dw_hdmi_top_write(meson_dw_hdmi, HDMITX_TOP_INTR_MASKN,
856 HDMITX_TOP_INTR_CORE);
857
858 /* Bridge / Connector */
859
860 dw_plat_data->mode_valid = dw_hdmi_mode_valid;
861 dw_plat_data->phy_ops = &meson_dw_hdmi_phy_ops;
862 dw_plat_data->phy_name = "meson_dw_hdmi_phy";
863 dw_plat_data->phy_data = meson_dw_hdmi;
864 dw_plat_data->input_bus_format = MEDIA_BUS_FMT_YUV8_1X24;
865 dw_plat_data->input_bus_encoding = V4L2_YCBCR_ENC_709;
866
867 ret = dw_hdmi_bind(pdev, encoder, &meson_dw_hdmi->dw_plat_data);
868 if (ret)
869 return ret;
870
871 DRM_DEBUG_DRIVER("HDMI controller initialized\n");
872
873 return 0;
874}
875
876static void meson_dw_hdmi_unbind(struct device *dev, struct device *master,
877 void *data)
878{
879 dw_hdmi_unbind(dev);
880}
881
882static const struct component_ops meson_dw_hdmi_ops = {
883 .bind = meson_dw_hdmi_bind,
884 .unbind = meson_dw_hdmi_unbind,
885};
886
887static int meson_dw_hdmi_probe(struct platform_device *pdev)
888{
889 return component_add(&pdev->dev, &meson_dw_hdmi_ops);
890}
891
892static int meson_dw_hdmi_remove(struct platform_device *pdev)
893{
894 component_del(&pdev->dev, &meson_dw_hdmi_ops);
895
896 return 0;
897}
898
899static const struct of_device_id meson_dw_hdmi_of_table[] = {
900 { .compatible = "amlogic,meson-gxbb-dw-hdmi" },
901 { .compatible = "amlogic,meson-gxl-dw-hdmi" },
902 { .compatible = "amlogic,meson-gxm-dw-hdmi" },
903 { }
904};
905MODULE_DEVICE_TABLE(of, meson_dw_hdmi_of_table);
906
907static struct platform_driver meson_dw_hdmi_platform_driver = {
908 .probe = meson_dw_hdmi_probe,
909 .remove = meson_dw_hdmi_remove,
910 .driver = {
911 .name = DRIVER_NAME,
912 .of_match_table = meson_dw_hdmi_of_table,
913 },
914};
915module_platform_driver(meson_dw_hdmi_platform_driver);
916
917MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
918MODULE_DESCRIPTION(DRIVER_DESC);
919MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.h b/drivers/gpu/drm/meson/meson_dw_hdmi.h
new file mode 100644
index 000000000000..0b81183125e3
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.h
@@ -0,0 +1,146 @@
1/*
2 * Copyright (C) 2016 BayLibre, SAS
3 * Author: Neil Armstrong <narmstrong@baylibre.com>
4 * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2 of the
9 * License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#ifndef __MESON_DW_HDMI_H
21#define __MESON_DW_HDMI_H
22
23/*
24 * Bit 7 RW Reserved. Default 1.
25 * Bit 6 RW Reserved. Default 1.
26 * Bit 5 RW Reserved. Default 1.
27 * Bit 4 RW sw_reset_phyif: PHY interface. 1=Apply reset; 0=Release from reset.
28 * Default 1.
29 * Bit 3 RW sw_reset_intr: interrupt module. 1=Apply reset;
30 * 0=Release from reset.
31 * Default 1.
32 * Bit 2 RW sw_reset_mem: KSV/REVOC mem. 1=Apply reset; 0=Release from reset.
33 * Default 1.
34 * Bit 1 RW sw_reset_rnd: random number interface to HDCP. 1=Apply reset;
35 * 0=Release from reset. Default 1.
36 * Bit 0 RW sw_reset_core: connects to IP's ~irstz. 1=Apply reset;
37 * 0=Release from reset. Default 1.
38 */
39#define HDMITX_TOP_SW_RESET (0x000)
40
41/*
42 * Bit 12 RW i2s_ws_inv:1=Invert i2s_ws; 0=No invert. Default 0.
43 * Bit 11 RW i2s_clk_inv: 1=Invert i2s_clk; 0=No invert. Default 0.
44 * Bit 10 RW spdif_clk_inv: 1=Invert spdif_clk; 0=No invert. Default 0.
45 * Bit 9 RW tmds_clk_inv: 1=Invert tmds_clk; 0=No invert. Default 0.
46 * Bit 8 RW pixel_clk_inv: 1=Invert pixel_clk; 0=No invert. Default 0.
47 * Bit 4 RW cec_clk_en: 1=enable cec_clk; 0=disable. Default 0.
48 * Bit 3 RW i2s_clk_en: 1=enable i2s_clk; 0=disable. Default 0.
49 * Bit 2 RW spdif_clk_en: 1=enable spdif_clk; 0=disable. Default 0.
50 * Bit 1 RW tmds_clk_en: 1=enable tmds_clk; 0=disable. Default 0.
51 * Bit 0 RW pixel_clk_en: 1=enable pixel_clk; 0=disable. Default 0.
52 */
53#define HDMITX_TOP_CLK_CNTL (0x001)
54
55/*
56 * Bit 11: 0 RW hpd_valid_width: filter out width <= M*1024. Default 0.
57 * Bit 15:12 RW hpd_glitch_width: filter out glitch <= N. Default 0.
58 */
59#define HDMITX_TOP_HPD_FILTER (0x002)
60
61/*
62 * intr_maskn: MASK_N, one bit per interrupt source.
63 * 1=Enable interrupt source; 0=Disable interrupt source. Default 0.
64 * [ 4] hdcp22_rndnum_err
65 * [ 3] nonce_rfrsh_rise
66 * [ 2] hpd_fall_intr
67 * [ 1] hpd_rise_intr
68 * [ 0] core_intr
69 */
70#define HDMITX_TOP_INTR_MASKN (0x003)
71
72/*
73 * Bit 30: 0 RW intr_stat: For each bit, write 1 to manually set the interrupt
74 * bit, read back the interrupt status.
75 * Bit 31 R IP interrupt status
76 * Bit 2 RW hpd_fall
77 * Bit 1 RW hpd_rise
78 * Bit 0 RW IP interrupt
79 */
80#define HDMITX_TOP_INTR_STAT (0x004)
81
82/*
83 * [4] hdcp22_rndnum_err
84 * [3] nonce_rfrsh_rise
85 * [2] hpd_fall
86 * [1] hpd_rise
87 * [0] core_intr_rise
88 */
89#define HDMITX_TOP_INTR_STAT_CLR (0x005)
90
91#define HDMITX_TOP_INTR_CORE BIT(0)
92#define HDMITX_TOP_INTR_HPD_RISE BIT(1)
93#define HDMITX_TOP_INTR_HPD_FALL BIT(2)
94
95/* Bit 14:12 RW tmds_sel: 3'b000=Output zero; 3'b001=Output normal TMDS data;
96 * 3'b010=Output PRBS data; 3'b100=Output shift pattern. Default 0.
97 * Bit 11: 9 RW shift_pttn_repeat: 0=New pattern every clk cycle; 1=New pattern
98 * every 2 clk cycles; ...; 7=New pattern every 8 clk cycles. Default 0.
99 * Bit 8 RW shift_pttn_en: 1= Enable shift pattern generator; 0=Disable.
100 * Default 0.
101 * Bit 4: 3 RW prbs_pttn_mode: 0=PRBS11; 1=PRBS15; 2=PRBS7; 3=PRBS31. Default 0.
102 * Bit 2: 1 RW prbs_pttn_width: 0=idle; 1=output 8-bit pattern;
103 * 2=Output 1-bit pattern; 3=output 10-bit pattern. Default 0.
104 * Bit 0 RW prbs_pttn_en: 1=Enable PRBS generator; 0=Disable. Default 0.
105 */
106#define HDMITX_TOP_BIST_CNTL (0x006)
107
108/* Bit 29:20 RW shift_pttn_data[59:50]. Default 0. */
109/* Bit 19:10 RW shift_pttn_data[69:60]. Default 0. */
110/* Bit 9: 0 RW shift_pttn_data[79:70]. Default 0. */
111#define HDMITX_TOP_SHIFT_PTTN_012 (0x007)
112
113/* Bit 29:20 RW shift_pttn_data[29:20]. Default 0. */
114/* Bit 19:10 RW shift_pttn_data[39:30]. Default 0. */
115/* Bit 9: 0 RW shift_pttn_data[49:40]. Default 0. */
116#define HDMITX_TOP_SHIFT_PTTN_345 (0x008)
117
118/* Bit 19:10 RW shift_pttn_data[ 9: 0]. Default 0. */
119/* Bit 9: 0 RW shift_pttn_data[19:10]. Default 0. */
120#define HDMITX_TOP_SHIFT_PTTN_67 (0x009)
121
122/* Bit 25:16 RW tmds_clk_pttn[19:10]. Default 0. */
123/* Bit 9: 0 RW tmds_clk_pttn[ 9: 0]. Default 0. */
124#define HDMITX_TOP_TMDS_CLK_PTTN_01 (0x00A)
125
126/* Bit 25:16 RW tmds_clk_pttn[39:30]. Default 0. */
127/* Bit 9: 0 RW tmds_clk_pttn[29:20]. Default 0. */
128#define HDMITX_TOP_TMDS_CLK_PTTN_23 (0x00B)
129
130/* Bit 1 RW shift_tmds_clk_pttn:1=Enable shifting clk pattern,
131 * used when TMDS CLK rate = TMDS character rate /4. Default 0.
132 * Bit 0 R Reserved. Default 0.
133 * [ 1] shift_tmds_clk_pttn
134 * [ 0] load_tmds_clk_pttn
135 */
136#define HDMITX_TOP_TMDS_CLK_PTTN_CNTL (0x00C)
137
138/* Bit 0 RW revocmem_wr_fail: Read back 1 to indicate Host write REVOC MEM
139 * failure, write 1 to clear the failure flag. Default 0.
140 */
141#define HDMITX_TOP_REVOCMEM_STAT (0x00D)
142
143/* Bit 0 R filtered HPD status. */
144#define HDMITX_TOP_STAT0 (0x00E)
145
146#endif /* __MESON_DW_HDMI_H */
diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h
index 6adf9c13fafa..284738196af9 100644
--- a/drivers/gpu/drm/meson/meson_registers.h
+++ b/drivers/gpu/drm/meson/meson_registers.h
@@ -1319,6 +1319,7 @@
1319#define VPU_MISC_CTRL 0x2740 1319#define VPU_MISC_CTRL 0x2740
1320#define VPU_ISP_GCLK_CTRL0 0x2741 1320#define VPU_ISP_GCLK_CTRL0 0x2741
1321#define VPU_ISP_GCLK_CTRL1 0x2742 1321#define VPU_ISP_GCLK_CTRL1 0x2742
1322#define VPU_HDMI_FMT_CTRL 0x2743
1322#define VPU_VDIN_ASYNC_HOLD_CTRL 0x2743 1323#define VPU_VDIN_ASYNC_HOLD_CTRL 0x2743
1323#define VPU_VDISP_ASYNC_HOLD_CTRL 0x2744 1324#define VPU_VDISP_ASYNC_HOLD_CTRL 0x2744
1324#define VPU_VPUARB2_ASYNC_HOLD_CTRL 0x2745 1325#define VPU_VPUARB2_ASYNC_HOLD_CTRL 0x2745
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index 252cfd4b19b1..47677047e42d 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -23,13 +23,38 @@
23#include "meson_drv.h" 23#include "meson_drv.h"
24#include "meson_vclk.h" 24#include "meson_vclk.h"
25 25
26/* 26/**
27 * DOC: Video Clocks
28 *
27 * VCLK is the "Pixel Clock" frequency generator from a dedicated PLL. 29 * VCLK is the "Pixel Clock" frequency generator from a dedicated PLL.
28 * We handle the following encodings : 30 * We handle the following encodings :
31 *
29 * - CVBS 27MHz generator via the VCLK2 to the VENCI and VDAC blocks 32 * - CVBS 27MHz generator via the VCLK2 to the VENCI and VDAC blocks
33 * - HDMI Pixel Clocks generation
30 * 34 *
31 * What is missing : 35 * What is missing :
32 * - HDMI Pixel Clocks generation 36 *
37 * - Genenate Pixel clocks for 2K/4K 10bit formats
38 *
39 * Clock generator scheme :
40 *
41 * .. code::
42 *
43 * __________ _________ _____
44 * | | | | | |--ENCI
45 * | HDMI PLL |-| PLL_DIV |--- VCLK--| |--ENCL
46 * |__________| |_________| \ | MUX |--ENCP
47 * --VCLK2-| |--VDAC
48 * |_____|--HDMI-TX
49 *
50 * Final clocks can take input for either VCLK or VCLK2, but
51 * VCLK is the preferred path for HDMI clocking and VCLK2 is the
52 * preferred path for CVBS VDAC clocking.
53 *
54 * VCLK and VCLK2 have fixed divided clocks paths for /1, /2, /4, /6 or /12.
55 *
56 * The PLL_DIV can achieve an additional fractional dividing like
57 * 1.5, 3.5, 3.75... to generate special 2K and 4K 10bit clocks.
33 */ 58 */
34 59
35/* HHI Registers */ 60/* HHI Registers */
@@ -50,11 +75,34 @@
50#define VCLK2_SOFT_RESET BIT(15) 75#define VCLK2_SOFT_RESET BIT(15)
51#define VCLK2_DIV1_EN BIT(0) 76#define VCLK2_DIV1_EN BIT(0)
52#define HHI_VID_CLK_DIV 0x164 /* 0x59 offset in data sheet */ 77#define HHI_VID_CLK_DIV 0x164 /* 0x59 offset in data sheet */
78#define VCLK_DIV_MASK 0xff
79#define VCLK_DIV_EN BIT(16)
80#define VCLK_DIV_RESET BIT(17)
81#define CTS_ENCP_SEL_MASK (0xf << 24)
82#define CTS_ENCP_SEL_SHIFT 24
53#define CTS_ENCI_SEL_MASK (0xf << 28) 83#define CTS_ENCI_SEL_MASK (0xf << 28)
54#define CTS_ENCI_SEL_SHIFT 28 84#define CTS_ENCI_SEL_SHIFT 28
85#define HHI_VID_CLK_CNTL 0x17c /* 0x5f offset in data sheet */
86#define VCLK_EN BIT(19)
87#define VCLK_SEL_MASK (0x7 << 16)
88#define VCLK_SEL_SHIFT 16
89#define VCLK_SOFT_RESET BIT(15)
90#define VCLK_DIV1_EN BIT(0)
91#define VCLK_DIV2_EN BIT(1)
92#define VCLK_DIV4_EN BIT(2)
93#define VCLK_DIV6_EN BIT(3)
94#define VCLK_DIV12_EN BIT(4)
55#define HHI_VID_CLK_CNTL2 0x194 /* 0x65 offset in data sheet */ 95#define HHI_VID_CLK_CNTL2 0x194 /* 0x65 offset in data sheet */
56#define CTS_ENCI_EN BIT(0) 96#define CTS_ENCI_EN BIT(0)
97#define CTS_ENCP_EN BIT(2)
57#define CTS_VDAC_EN BIT(4) 98#define CTS_VDAC_EN BIT(4)
99#define HDMI_TX_PIXEL_EN BIT(5)
100#define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 offset in data sheet */
101#define HDMI_TX_PIXEL_SEL_MASK (0xf << 16)
102#define HDMI_TX_PIXEL_SEL_SHIFT 16
103#define CTS_HDMI_SYS_SEL_MASK (0x7 << 9)
104#define CTS_HDMI_SYS_DIV_MASK (0x7f)
105#define CTS_HDMI_SYS_EN BIT(8)
58 106
59#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */ 107#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
60#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */ 108#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
@@ -69,6 +117,126 @@
69#define HDMI_PLL_RESET BIT(28) 117#define HDMI_PLL_RESET BIT(28)
70#define HDMI_PLL_LOCK BIT(31) 118#define HDMI_PLL_LOCK BIT(31)
71 119
120/* VID PLL Dividers */
121enum {
122 VID_PLL_DIV_1 = 0,
123 VID_PLL_DIV_2,
124 VID_PLL_DIV_2p5,
125 VID_PLL_DIV_3,
126 VID_PLL_DIV_3p5,
127 VID_PLL_DIV_3p75,
128 VID_PLL_DIV_4,
129 VID_PLL_DIV_5,
130 VID_PLL_DIV_6,
131 VID_PLL_DIV_6p25,
132 VID_PLL_DIV_7,
133 VID_PLL_DIV_7p5,
134 VID_PLL_DIV_12,
135 VID_PLL_DIV_14,
136 VID_PLL_DIV_15,
137};
138
139void meson_vid_pll_set(struct meson_drm *priv, unsigned int div)
140{
141 unsigned int shift_val = 0;
142 unsigned int shift_sel = 0;
143
144 /* Disable vid_pll output clock */
145 regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV, VID_PLL_EN, 0);
146 regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV, VID_PLL_PRESET, 0);
147
148 switch (div) {
149 case VID_PLL_DIV_2:
150 shift_val = 0x0aaa;
151 shift_sel = 0;
152 break;
153 case VID_PLL_DIV_2p5:
154 shift_val = 0x5294;
155 shift_sel = 2;
156 break;
157 case VID_PLL_DIV_3:
158 shift_val = 0x0db6;
159 shift_sel = 0;
160 break;
161 case VID_PLL_DIV_3p5:
162 shift_val = 0x36cc;
163 shift_sel = 1;
164 break;
165 case VID_PLL_DIV_3p75:
166 shift_val = 0x6666;
167 shift_sel = 2;
168 break;
169 case VID_PLL_DIV_4:
170 shift_val = 0x0ccc;
171 shift_sel = 0;
172 break;
173 case VID_PLL_DIV_5:
174 shift_val = 0x739c;
175 shift_sel = 2;
176 break;
177 case VID_PLL_DIV_6:
178 shift_val = 0x0e38;
179 shift_sel = 0;
180 break;
181 case VID_PLL_DIV_6p25:
182 shift_val = 0x0000;
183 shift_sel = 3;
184 break;
185 case VID_PLL_DIV_7:
186 shift_val = 0x3c78;
187 shift_sel = 1;
188 break;
189 case VID_PLL_DIV_7p5:
190 shift_val = 0x78f0;
191 shift_sel = 2;
192 break;
193 case VID_PLL_DIV_12:
194 shift_val = 0x0fc0;
195 shift_sel = 0;
196 break;
197 case VID_PLL_DIV_14:
198 shift_val = 0x3f80;
199 shift_sel = 1;
200 break;
201 case VID_PLL_DIV_15:
202 shift_val = 0x7f80;
203 shift_sel = 2;
204 break;
205 }
206
207 if (div == VID_PLL_DIV_1)
208 /* Enable vid_pll bypass to HDMI pll */
209 regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
210 VID_PLL_BYPASS, VID_PLL_BYPASS);
211 else {
212 /* Disable Bypass */
213 regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
214 VID_PLL_BYPASS, 0);
215 /* Clear sel */
216 regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
217 3 << 16, 0);
218 regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
219 VID_PLL_PRESET, 0);
220 regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
221 0x7fff, 0);
222
223 /* Setup sel and val */
224 regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
225 3 << 16, shift_sel << 16);
226 regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
227 VID_PLL_PRESET, VID_PLL_PRESET);
228 regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
229 0x7fff, shift_val);
230
231 regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
232 VID_PLL_PRESET, 0);
233 }
234
235 /* Enable the vid_pll output clock */
236 regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
237 VID_PLL_EN, VID_PLL_EN);
238}
239
72/* 240/*
73 * Setup VCLK2 for 27MHz, and enable clocks for ENCI and VDAC 241 * Setup VCLK2 for 27MHz, and enable clocks for ENCI and VDAC
74 * 242 *
@@ -110,15 +278,8 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
110 /* Disable VCLK2 */ 278 /* Disable VCLK2 */
111 regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL, VCLK2_EN, 0); 279 regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL, VCLK2_EN, 0);
112 280
113 /* Disable vid_pll output clock */ 281 /* Setup vid_pll to /1 */
114 regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV, VID_PLL_EN, 0); 282 meson_vid_pll_set(priv, VID_PLL_DIV_1);
115 regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV, VID_PLL_PRESET, 0);
116 /* Enable vid_pll bypass to HDMI pll */
117 regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
118 VID_PLL_BYPASS, VID_PLL_BYPASS);
119 /* Enable the vid_pll output clock */
120 regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
121 VID_PLL_EN, VID_PLL_EN);
122 283
123 /* Setup the VCLK2 divider value to achieve 27MHz */ 284 /* Setup the VCLK2 divider value to achieve 27MHz */
124 regmap_update_bits(priv->hhi, HHI_VIID_CLK_DIV, 285 regmap_update_bits(priv->hhi, HHI_VIID_CLK_DIV,
@@ -159,9 +320,454 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
159 CTS_VDAC_EN, CTS_VDAC_EN); 320 CTS_VDAC_EN, CTS_VDAC_EN);
160} 321}
161 322
323
324/* PLL O1 O2 O3 VP DV EN TX */
325/* 4320 /4 /4 /1 /5 /1 => /2 /2 */
326#define MESON_VCLK_HDMI_ENCI_54000 1
327/* 4320 /4 /4 /1 /5 /1 => /1 /2 */
328#define MESON_VCLK_HDMI_DDR_54000 2
329/* 2970 /4 /1 /1 /5 /1 => /1 /2 */
330#define MESON_VCLK_HDMI_DDR_148500 3
331/* 2970 /2 /2 /2 /5 /1 => /1 /1 */
332#define MESON_VCLK_HDMI_74250 4
333/* 2970 /1 /2 /2 /5 /1 => /1 /1 */
334#define MESON_VCLK_HDMI_148500 5
335/* 2970 /1 /1 /1 /5 /2 => /1 /1 */
336#define MESON_VCLK_HDMI_297000 6
337/* 5940 /1 /1 /2 /5 /1 => /1 /1 */
338#define MESON_VCLK_HDMI_594000 7
339
340struct meson_vclk_params {
341 unsigned int pll_base_freq;
342 unsigned int pll_od1;
343 unsigned int pll_od2;
344 unsigned int pll_od3;
345 unsigned int vid_pll_div;
346 unsigned int vclk_div;
347} params[] = {
348 [MESON_VCLK_HDMI_ENCI_54000] = {
349 .pll_base_freq = 4320000,
350 .pll_od1 = 4,
351 .pll_od2 = 4,
352 .pll_od3 = 1,
353 .vid_pll_div = VID_PLL_DIV_5,
354 .vclk_div = 1,
355 },
356 [MESON_VCLK_HDMI_DDR_54000] = {
357 .pll_base_freq = 4320000,
358 .pll_od1 = 4,
359 .pll_od2 = 4,
360 .pll_od3 = 1,
361 .vid_pll_div = VID_PLL_DIV_5,
362 .vclk_div = 1,
363 },
364 [MESON_VCLK_HDMI_DDR_148500] = {
365 .pll_base_freq = 2970000,
366 .pll_od1 = 4,
367 .pll_od2 = 1,
368 .pll_od3 = 1,
369 .vid_pll_div = VID_PLL_DIV_5,
370 .vclk_div = 1,
371 },
372 [MESON_VCLK_HDMI_74250] = {
373 .pll_base_freq = 2970000,
374 .pll_od1 = 2,
375 .pll_od2 = 2,
376 .pll_od3 = 2,
377 .vid_pll_div = VID_PLL_DIV_5,
378 .vclk_div = 1,
379 },
380 [MESON_VCLK_HDMI_148500] = {
381 .pll_base_freq = 2970000,
382 .pll_od1 = 1,
383 .pll_od2 = 2,
384 .pll_od3 = 2,
385 .vid_pll_div = VID_PLL_DIV_5,
386 .vclk_div = 1,
387 },
388 [MESON_VCLK_HDMI_297000] = {
389 .pll_base_freq = 2970000,
390 .pll_od1 = 1,
391 .pll_od2 = 1,
392 .pll_od3 = 1,
393 .vid_pll_div = VID_PLL_DIV_5,
394 .vclk_div = 2,
395 },
396 [MESON_VCLK_HDMI_594000] = {
397 .pll_base_freq = 5940000,
398 .pll_od1 = 1,
399 .pll_od2 = 1,
400 .pll_od3 = 2,
401 .vid_pll_div = VID_PLL_DIV_5,
402 .vclk_div = 1,
403 },
404};
405
406static inline unsigned int pll_od_to_reg(unsigned int od)
407{
408 switch (od) {
409 case 1:
410 return 0;
411 case 2:
412 return 1;
413 case 4:
414 return 2;
415 case 8:
416 return 3;
417 }
418
419 /* Invalid */
420 return 0;
421}
422
423void meson_hdmi_pll_set(struct meson_drm *priv,
424 unsigned int base,
425 unsigned int od1,
426 unsigned int od2,
427 unsigned int od3)
428{
429 unsigned int val;
430
431 if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
432 switch (base) {
433 case 2970000:
434 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800023d);
435 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
436 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
437 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
438 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
439 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
440
441 /* Enable and unreset */
442 regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
443 0x7 << 28, 0x4 << 28);
444
445 /* Poll for lock bit */
446 regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
447 val, (val & HDMI_PLL_LOCK), 10, 0);
448
449 /* div_frac */
450 regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
451 0xFFFF, 0x4e00);
452 break;
453
454 case 4320000:
455 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800025a);
456 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
457 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
458 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
459 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
460 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
461
462 /* unreset */
463 regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
464 BIT(28), 0);
465
466 /* Poll for lock bit */
467 regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
468 val, (val & HDMI_PLL_LOCK), 10, 0);
469 break;
470
471 case 5940000:
472 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800027b);
473 regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
474 0xFFFF, 0x4c00);
475 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x135c5091);
476 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
477 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
478 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
479
480 /* unreset */
481 regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
482 BIT(28), 0);
483
484 /* Poll for lock bit */
485 regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
486 val, (val & HDMI_PLL_LOCK), 10, 0);
487 break;
488 };
489 } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
490 meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
491 switch (base) {
492 case 2970000:
493 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x4000027b);
494 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb300);
495 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
496 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
497 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
498 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
499 break;
500
501 case 4320000:
502 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002b4);
503 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb000);
504 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
505 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
506 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
507 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
508 break;
509
510 case 5940000:
511 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002f7);
512 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb200);
513 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
514 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
515 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
516 regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
517 break;
518
519 };
520
521 /* Reset PLL */
522 regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
523 HDMI_PLL_RESET, HDMI_PLL_RESET);
524 regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
525 HDMI_PLL_RESET, 0);
526
527 /* Poll for lock bit */
528 regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
529 (val & HDMI_PLL_LOCK), 10, 0);
530 };
531
532 if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
533 regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
534 3 << 16, pll_od_to_reg(od1) << 16);
535 else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
536 meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
537 regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
538 3 << 21, pll_od_to_reg(od1) << 21);
539
540 if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
541 regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
542 3 << 22, pll_od_to_reg(od2) << 22);
543 else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
544 meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
545 regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
546 3 << 23, pll_od_to_reg(od2) << 23);
547
548 if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
549 regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
550 3 << 18, pll_od_to_reg(od3) << 18);
551 else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
552 meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
553 regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
554 3 << 19, pll_od_to_reg(od3) << 19);
555}
556
162void meson_vclk_setup(struct meson_drm *priv, unsigned int target, 557void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
163 unsigned int freq) 558 unsigned int vclk_freq, unsigned int venc_freq,
559 unsigned int dac_freq, bool hdmi_use_enci)
164{ 560{
165 if (target == MESON_VCLK_TARGET_CVBS && freq == MESON_VCLK_CVBS) 561 unsigned int freq;
562 unsigned int hdmi_tx_div;
563 unsigned int venc_div;
564
565 if (target == MESON_VCLK_TARGET_CVBS) {
166 meson_venci_cvbs_clock_config(priv); 566 meson_venci_cvbs_clock_config(priv);
567 return;
568 }
569
570 hdmi_tx_div = vclk_freq / dac_freq;
571
572 if (hdmi_tx_div == 0) {
573 pr_err("Fatal Error, invalid HDMI-TX freq %d\n",
574 dac_freq);
575 return;
576 }
577
578 venc_div = vclk_freq / venc_freq;
579
580 if (venc_div == 0) {
581 pr_err("Fatal Error, invalid HDMI venc freq %d\n",
582 venc_freq);
583 return;
584 }
585
586 switch (vclk_freq) {
587 case 54000:
588 if (hdmi_use_enci)
589 freq = MESON_VCLK_HDMI_ENCI_54000;
590 else
591 freq = MESON_VCLK_HDMI_DDR_54000;
592 break;
593 case 74250:
594 freq = MESON_VCLK_HDMI_74250;
595 break;
596 case 148500:
597 if (dac_freq != 148500)
598 freq = MESON_VCLK_HDMI_DDR_148500;
599 else
600 freq = MESON_VCLK_HDMI_148500;
601 break;
602 case 297000:
603 freq = MESON_VCLK_HDMI_297000;
604 break;
605 case 594000:
606 freq = MESON_VCLK_HDMI_594000;
607 break;
608 default:
609 pr_err("Fatal Error, invalid HDMI vclk freq %d\n",
610 vclk_freq);
611 return;
612 }
613
614 /* Set HDMI-TX sys clock */
615 regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
616 CTS_HDMI_SYS_SEL_MASK, 0);
617 regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
618 CTS_HDMI_SYS_DIV_MASK, 0);
619 regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
620 CTS_HDMI_SYS_EN, CTS_HDMI_SYS_EN);
621
622 /* Set HDMI PLL rate */
623 meson_hdmi_pll_set(priv, params[freq].pll_base_freq,
624 params[freq].pll_od1,
625 params[freq].pll_od2,
626 params[freq].pll_od3);
627
628 /* Setup vid_pll divider */
629 meson_vid_pll_set(priv, params[freq].vid_pll_div);
630
631 /* Set VCLK div */
632 regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
633 VCLK_SEL_MASK, 0);
634 regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
635 VCLK_DIV_MASK, params[freq].vclk_div - 1);
636
637 /* Set HDMI-TX source */
638 switch (hdmi_tx_div) {
639 case 1:
640 /* enable vclk_div1 gate */
641 regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
642 VCLK_DIV1_EN, VCLK_DIV1_EN);
643
644 /* select vclk_div1 for HDMI-TX */
645 regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
646 HDMI_TX_PIXEL_SEL_MASK, 0);
647 break;
648 case 2:
649 /* enable vclk_div2 gate */
650 regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
651 VCLK_DIV2_EN, VCLK_DIV2_EN);
652
653 /* select vclk_div2 for HDMI-TX */
654 regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
655 HDMI_TX_PIXEL_SEL_MASK, 1 << HDMI_TX_PIXEL_SEL_SHIFT);
656 break;
657 case 4:
658 /* enable vclk_div4 gate */
659 regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
660 VCLK_DIV4_EN, VCLK_DIV4_EN);
661
662 /* select vclk_div4 for HDMI-TX */
663 regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
664 HDMI_TX_PIXEL_SEL_MASK, 2 << HDMI_TX_PIXEL_SEL_SHIFT);
665 break;
666 case 6:
667 /* enable vclk_div6 gate */
668 regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
669 VCLK_DIV6_EN, VCLK_DIV6_EN);
670
671 /* select vclk_div6 for HDMI-TX */
672 regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
673 HDMI_TX_PIXEL_SEL_MASK, 3 << HDMI_TX_PIXEL_SEL_SHIFT);
674 break;
675 case 12:
676 /* enable vclk_div12 gate */
677 regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
678 VCLK_DIV12_EN, VCLK_DIV12_EN);
679
680 /* select vclk_div12 for HDMI-TX */
681 regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
682 HDMI_TX_PIXEL_SEL_MASK, 4 << HDMI_TX_PIXEL_SEL_SHIFT);
683 break;
684 }
685 regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL2,
686 HDMI_TX_PIXEL_EN, HDMI_TX_PIXEL_EN);
687
688 /* Set ENCI/ENCP Source */
689 switch (venc_div) {
690 case 1:
691 /* enable vclk_div1 gate */
692 regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
693 VCLK_DIV1_EN, VCLK_DIV1_EN);
694
695 if (hdmi_use_enci)
696 /* select vclk_div1 for enci */
697 regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
698 CTS_ENCI_SEL_MASK, 0);
699 else
700 /* select vclk_div1 for encp */
701 regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
702 CTS_ENCP_SEL_MASK, 0);
703 break;
704 case 2:
705 /* enable vclk_div2 gate */
706 regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
707 VCLK_DIV2_EN, VCLK_DIV2_EN);
708
709 if (hdmi_use_enci)
710 /* select vclk_div2 for enci */
711 regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
712 CTS_ENCI_SEL_MASK, 1 << CTS_ENCI_SEL_SHIFT);
713 else
714 /* select vclk_div2 for encp */
715 regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
716 CTS_ENCP_SEL_MASK, 1 << CTS_ENCP_SEL_SHIFT);
717 break;
718 case 4:
719 /* enable vclk_div4 gate */
720 regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
721 VCLK_DIV4_EN, VCLK_DIV4_EN);
722
723 if (hdmi_use_enci)
724 /* select vclk_div4 for enci */
725 regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
726 CTS_ENCI_SEL_MASK, 2 << CTS_ENCI_SEL_SHIFT);
727 else
728 /* select vclk_div4 for encp */
729 regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
730 CTS_ENCP_SEL_MASK, 2 << CTS_ENCP_SEL_SHIFT);
731 break;
732 case 6:
733 /* enable vclk_div6 gate */
734 regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
735 VCLK_DIV6_EN, VCLK_DIV6_EN);
736
737 if (hdmi_use_enci)
738 /* select vclk_div6 for enci */
739 regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
740 CTS_ENCI_SEL_MASK, 3 << CTS_ENCI_SEL_SHIFT);
741 else
742 /* select vclk_div6 for encp */
743 regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
744 CTS_ENCP_SEL_MASK, 3 << CTS_ENCP_SEL_SHIFT);
745 break;
746 case 12:
747 /* enable vclk_div12 gate */
748 regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
749 VCLK_DIV12_EN, VCLK_DIV12_EN);
750
751 if (hdmi_use_enci)
752 /* select vclk_div12 for enci */
753 regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
754 CTS_ENCI_SEL_MASK, 4 << CTS_ENCI_SEL_SHIFT);
755 else
756 /* select vclk_div12 for encp */
757 regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
758 CTS_ENCP_SEL_MASK, 4 << CTS_ENCP_SEL_SHIFT);
759 break;
760 }
761
762 if (hdmi_use_enci)
763 /* Enable ENCI clock gate */
764 regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL2,
765 CTS_ENCI_EN, CTS_ENCI_EN);
766 else
767 /* Enable ENCP clock gate */
768 regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL2,
769 CTS_ENCP_EN, CTS_ENCP_EN);
770
771 regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL, VCLK_EN, VCLK_EN);
167} 772}
773EXPORT_SYMBOL_GPL(meson_vclk_setup);
diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h
index ec62735996de..0401b5213471 100644
--- a/drivers/gpu/drm/meson/meson_vclk.h
+++ b/drivers/gpu/drm/meson/meson_vclk.h
@@ -23,12 +23,14 @@
23 23
24enum { 24enum {
25 MESON_VCLK_TARGET_CVBS = 0, 25 MESON_VCLK_TARGET_CVBS = 0,
26 MESON_VCLK_TARGET_HDMI = 1,
26}; 27};
27 28
28/* 27MHz is the CVBS Pixel Clock */ 29/* 27MHz is the CVBS Pixel Clock */
29#define MESON_VCLK_CVBS 27000 30#define MESON_VCLK_CVBS 27000
30 31
31void meson_vclk_setup(struct meson_drm *priv, unsigned int target, 32void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
32 unsigned int freq); 33 unsigned int vclk_freq, unsigned int venc_freq,
34 unsigned int dac_freq, bool hdmi_use_enci);
33 35
34#endif /* __MESON_VCLK_H */ 36#endif /* __MESON_VCLK_H */
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index f7c870172220..9509017dbded 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -26,16 +26,48 @@
26#include "meson_vclk.h" 26#include "meson_vclk.h"
27#include "meson_registers.h" 27#include "meson_registers.h"
28 28
29/* 29/**
30 * DOC: Video Encoder
31 *
30 * VENC Handle the pixels encoding to the output formats. 32 * VENC Handle the pixels encoding to the output formats.
31 * We handle the following encodings : 33 * We handle the following encodings :
32 * - CVBS Encoding via the ENCI encoder and VDAC digital to analog converter
33 * 34 *
34 * What is missing : 35 * - CVBS Encoding via the ENCI encoder and VDAC digital to analog converter
35 * - TMDS/HDMI Encoding via ENCI_DIV and ENCP 36 * - TMDS/HDMI Encoding via ENCI_DIV and ENCP
36 * - Setup of more clock rates for HDMI modes 37 * - Setup of more clock rates for HDMI modes
38 *
39 * What is missing :
40 *
37 * - LCD Panel encoding via ENCL 41 * - LCD Panel encoding via ENCL
38 * - TV Panel encoding via ENCT 42 * - TV Panel encoding via ENCT
43 *
44 * VENC paths :
45 *
46 * .. code::
47 *
48 * _____ _____ ____________________
49 * vd1---| |-| | | VENC /---------|----VDAC
50 * vd2---| VIU |-| VPP |-|-----ENCI/-ENCI_DVI-|-|
51 * osd1--| |-| | | \ | X--HDMI-TX
52 * osd2--|_____|-|_____| | |\-ENCP--ENCP_DVI-|-|
53 * | | |
54 * | \--ENCL-----------|----LVDS
55 * |____________________|
56 *
57 * The ENCI is designed for PAl or NTSC encoding and can go through the VDAC
58 * directly for CVBS encoding or through the ENCI_DVI encoder for HDMI.
59 * The ENCP is designed for Progressive encoding but can also generate
60 * 1080i interlaced pixels, and was initialy desined to encode pixels for
61 * VDAC to output RGB ou YUV analog outputs.
62 * It's output is only used through the ENCP_DVI encoder for HDMI.
63 * The ENCL LVDS encoder is not implemented.
64 *
65 * The ENCI and ENCP encoders needs specially defined parameters for each
66 * supported mode and thus cannot be determined from standard video timings.
67 *
68 * The ENCI end ENCP DVI encoders are more generic and can generate any timings
69 * from the pixel data generated by ENCI or ENCP, so can use the standard video
70 * timings are source for HW parameters.
39 */ 71 */
40 72
41/* HHI Registers */ 73/* HHI Registers */
@@ -91,6 +123,1219 @@ struct meson_cvbs_enci_mode meson_cvbs_enci_ntsc = {
91 .analog_sync_adj = 0x9c00, 123 .analog_sync_adj = 0x9c00,
92}; 124};
93 125
126union meson_hdmi_venc_mode {
127 struct {
128 unsigned int mode_tag;
129 unsigned int hso_begin;
130 unsigned int hso_end;
131 unsigned int vso_even;
132 unsigned int vso_odd;
133 unsigned int macv_max_amp;
134 unsigned int video_prog_mode;
135 unsigned int video_mode;
136 unsigned int sch_adjust;
137 unsigned int yc_delay;
138 unsigned int pixel_start;
139 unsigned int pixel_end;
140 unsigned int top_field_line_start;
141 unsigned int top_field_line_end;
142 unsigned int bottom_field_line_start;
143 unsigned int bottom_field_line_end;
144 } enci;
145 struct {
146 unsigned int dvi_settings;
147 unsigned int video_mode;
148 unsigned int video_mode_adv;
149 unsigned int video_prog_mode;
150 bool video_prog_mode_present;
151 unsigned int video_sync_mode;
152 bool video_sync_mode_present;
153 unsigned int video_yc_dly;
154 bool video_yc_dly_present;
155 unsigned int video_rgb_ctrl;
156 bool video_rgb_ctrl_present;
157 unsigned int video_filt_ctrl;
158 bool video_filt_ctrl_present;
159 unsigned int video_ofld_voav_ofst;
160 bool video_ofld_voav_ofst_present;
161 unsigned int yfp1_htime;
162 unsigned int yfp2_htime;
163 unsigned int max_pxcnt;
164 unsigned int hspuls_begin;
165 unsigned int hspuls_end;
166 unsigned int hspuls_switch;
167 unsigned int vspuls_begin;
168 unsigned int vspuls_end;
169 unsigned int vspuls_bline;
170 unsigned int vspuls_eline;
171 unsigned int eqpuls_begin;
172 bool eqpuls_begin_present;
173 unsigned int eqpuls_end;
174 bool eqpuls_end_present;
175 unsigned int eqpuls_bline;
176 bool eqpuls_bline_present;
177 unsigned int eqpuls_eline;
178 bool eqpuls_eline_present;
179 unsigned int havon_begin;
180 unsigned int havon_end;
181 unsigned int vavon_bline;
182 unsigned int vavon_eline;
183 unsigned int hso_begin;
184 unsigned int hso_end;
185 unsigned int vso_begin;
186 unsigned int vso_end;
187 unsigned int vso_bline;
188 unsigned int vso_eline;
189 bool vso_eline_present;
190 unsigned int sy_val;
191 bool sy_val_present;
192 unsigned int sy2_val;
193 bool sy2_val_present;
194 unsigned int max_lncnt;
195 } encp;
196};
197
198union meson_hdmi_venc_mode meson_hdmi_enci_mode_480i = {
199 .enci = {
200 .hso_begin = 5,
201 .hso_end = 129,
202 .vso_even = 3,
203 .vso_odd = 260,
204 .macv_max_amp = 0x810b,
205 .video_prog_mode = 0xf0,
206 .video_mode = 0x8,
207 .sch_adjust = 0x20,
208 .yc_delay = 0,
209 .pixel_start = 227,
210 .pixel_end = 1667,
211 .top_field_line_start = 18,
212 .top_field_line_end = 258,
213 .bottom_field_line_start = 19,
214 .bottom_field_line_end = 259,
215 },
216};
217
218union meson_hdmi_venc_mode meson_hdmi_enci_mode_576i = {
219 .enci = {
220 .hso_begin = 3,
221 .hso_end = 129,
222 .vso_even = 3,
223 .vso_odd = 260,
224 .macv_max_amp = 8107,
225 .video_prog_mode = 0xff,
226 .video_mode = 0x13,
227 .sch_adjust = 0x28,
228 .yc_delay = 0x333,
229 .pixel_start = 251,
230 .pixel_end = 1691,
231 .top_field_line_start = 22,
232 .top_field_line_end = 310,
233 .bottom_field_line_start = 23,
234 .bottom_field_line_end = 311,
235 },
236};
237
238union meson_hdmi_venc_mode meson_hdmi_encp_mode_480p = {
239 .encp = {
240 .dvi_settings = 0x21,
241 .video_mode = 0x4000,
242 .video_mode_adv = 0x9,
243 .video_prog_mode = 0,
244 .video_prog_mode_present = true,
245 .video_sync_mode = 7,
246 .video_sync_mode_present = true,
247 /* video_yc_dly */
248 /* video_rgb_ctrl */
249 .video_filt_ctrl = 0x2052,
250 .video_filt_ctrl_present = true,
251 /* video_ofld_voav_ofst */
252 .yfp1_htime = 244,
253 .yfp2_htime = 1630,
254 .max_pxcnt = 1715,
255 .hspuls_begin = 0x22,
256 .hspuls_end = 0xa0,
257 .hspuls_switch = 88,
258 .vspuls_begin = 0,
259 .vspuls_end = 1589,
260 .vspuls_bline = 0,
261 .vspuls_eline = 5,
262 .havon_begin = 249,
263 .havon_end = 1689,
264 .vavon_bline = 42,
265 .vavon_eline = 521,
266 /* eqpuls_begin */
267 /* eqpuls_end */
268 /* eqpuls_bline */
269 /* eqpuls_eline */
270 .hso_begin = 3,
271 .hso_end = 5,
272 .vso_begin = 3,
273 .vso_end = 5,
274 .vso_bline = 0,
275 /* vso_eline */
276 .sy_val = 8,
277 .sy_val_present = true,
278 .sy2_val = 0x1d8,
279 .sy2_val_present = true,
280 .max_lncnt = 524,
281 },
282};
283
284union meson_hdmi_venc_mode meson_hdmi_encp_mode_576p = {
285 .encp = {
286 .dvi_settings = 0x21,
287 .video_mode = 0x4000,
288 .video_mode_adv = 0x9,
289 .video_prog_mode = 0,
290 .video_prog_mode_present = true,
291 .video_sync_mode = 7,
292 .video_sync_mode_present = true,
293 /* video_yc_dly */
294 /* video_rgb_ctrl */
295 .video_filt_ctrl = 0x52,
296 .video_filt_ctrl_present = true,
297 /* video_ofld_voav_ofst */
298 .yfp1_htime = 235,
299 .yfp2_htime = 1674,
300 .max_pxcnt = 1727,
301 .hspuls_begin = 0,
302 .hspuls_end = 0x80,
303 .hspuls_switch = 88,
304 .vspuls_begin = 0,
305 .vspuls_end = 1599,
306 .vspuls_bline = 0,
307 .vspuls_eline = 4,
308 .havon_begin = 235,
309 .havon_end = 1674,
310 .vavon_bline = 44,
311 .vavon_eline = 619,
312 /* eqpuls_begin */
313 /* eqpuls_end */
314 /* eqpuls_bline */
315 /* eqpuls_eline */
316 .hso_begin = 0x80,
317 .hso_end = 0,
318 .vso_begin = 0,
319 .vso_end = 5,
320 .vso_bline = 0,
321 /* vso_eline */
322 .sy_val = 8,
323 .sy_val_present = true,
324 .sy2_val = 0x1d8,
325 .sy2_val_present = true,
326 .max_lncnt = 624,
327 },
328};
329
330union meson_hdmi_venc_mode meson_hdmi_encp_mode_720p60 = {
331 .encp = {
332 .dvi_settings = 0x2029,
333 .video_mode = 0x4040,
334 .video_mode_adv = 0x19,
335 /* video_prog_mode */
336 /* video_sync_mode */
337 /* video_yc_dly */
338 /* video_rgb_ctrl */
339 /* video_filt_ctrl */
340 /* video_ofld_voav_ofst */
341 .yfp1_htime = 648,
342 .yfp2_htime = 3207,
343 .max_pxcnt = 3299,
344 .hspuls_begin = 80,
345 .hspuls_end = 240,
346 .hspuls_switch = 80,
347 .vspuls_begin = 688,
348 .vspuls_end = 3248,
349 .vspuls_bline = 4,
350 .vspuls_eline = 8,
351 .havon_begin = 648,
352 .havon_end = 3207,
353 .vavon_bline = 29,
354 .vavon_eline = 748,
355 /* eqpuls_begin */
356 /* eqpuls_end */
357 /* eqpuls_bline */
358 /* eqpuls_eline */
359 .hso_begin = 256,
360 .hso_end = 168,
361 .vso_begin = 168,
362 .vso_end = 256,
363 .vso_bline = 0,
364 .vso_eline = 5,
365 .vso_eline_present = true,
366 /* sy_val */
367 /* sy2_val */
368 .max_lncnt = 749,
369 },
370};
371
372union meson_hdmi_venc_mode meson_hdmi_encp_mode_720p50 = {
373 .encp = {
374 .dvi_settings = 0x202d,
375 .video_mode = 0x4040,
376 .video_mode_adv = 0x19,
377 .video_prog_mode = 0x100,
378 .video_prog_mode_present = true,
379 .video_sync_mode = 0x407,
380 .video_sync_mode_present = true,
381 .video_yc_dly = 0,
382 .video_yc_dly_present = true,
383 /* video_rgb_ctrl */
384 /* video_filt_ctrl */
385 /* video_ofld_voav_ofst */
386 .yfp1_htime = 648,
387 .yfp2_htime = 3207,
388 .max_pxcnt = 3959,
389 .hspuls_begin = 80,
390 .hspuls_end = 240,
391 .hspuls_switch = 80,
392 .vspuls_begin = 688,
393 .vspuls_end = 3248,
394 .vspuls_bline = 4,
395 .vspuls_eline = 8,
396 .havon_begin = 648,
397 .havon_end = 3207,
398 .vavon_bline = 29,
399 .vavon_eline = 748,
400 /* eqpuls_begin */
401 /* eqpuls_end */
402 /* eqpuls_bline */
403 /* eqpuls_eline */
404 .hso_begin = 128,
405 .hso_end = 208,
406 .vso_begin = 128,
407 .vso_end = 128,
408 .vso_bline = 0,
409 .vso_eline = 5,
410 .vso_eline_present = true,
411 /* sy_val */
412 /* sy2_val */
413 .max_lncnt = 749,
414 },
415};
416
417union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080i60 = {
418 .encp = {
419 .dvi_settings = 0x2029,
420 .video_mode = 0x5ffc,
421 .video_mode_adv = 0x19,
422 .video_prog_mode = 0x100,
423 .video_prog_mode_present = true,
424 .video_sync_mode = 0x207,
425 .video_sync_mode_present = true,
426 /* video_yc_dly */
427 /* video_rgb_ctrl */
428 /* video_filt_ctrl */
429 .video_ofld_voav_ofst = 0x11,
430 .video_ofld_voav_ofst_present = true,
431 .yfp1_htime = 516,
432 .yfp2_htime = 4355,
433 .max_pxcnt = 4399,
434 .hspuls_begin = 88,
435 .hspuls_end = 264,
436 .hspuls_switch = 88,
437 .vspuls_begin = 440,
438 .vspuls_end = 2200,
439 .vspuls_bline = 0,
440 .vspuls_eline = 4,
441 .havon_begin = 516,
442 .havon_end = 4355,
443 .vavon_bline = 20,
444 .vavon_eline = 559,
445 .eqpuls_begin = 2288,
446 .eqpuls_begin_present = true,
447 .eqpuls_end = 2464,
448 .eqpuls_end_present = true,
449 .eqpuls_bline = 0,
450 .eqpuls_bline_present = true,
451 .eqpuls_eline = 4,
452 .eqpuls_eline_present = true,
453 .hso_begin = 264,
454 .hso_end = 176,
455 .vso_begin = 88,
456 .vso_end = 88,
457 .vso_bline = 0,
458 .vso_eline = 5,
459 .vso_eline_present = true,
460 /* sy_val */
461 /* sy2_val */
462 .max_lncnt = 1124,
463 },
464};
465
466union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080i50 = {
467 .encp = {
468 .dvi_settings = 0x202d,
469 .video_mode = 0x5ffc,
470 .video_mode_adv = 0x19,
471 .video_prog_mode = 0x100,
472 .video_prog_mode_present = true,
473 .video_sync_mode = 0x7,
474 .video_sync_mode_present = true,
475 /* video_yc_dly */
476 /* video_rgb_ctrl */
477 /* video_filt_ctrl */
478 .video_ofld_voav_ofst = 0x11,
479 .video_ofld_voav_ofst_present = true,
480 .yfp1_htime = 526,
481 .yfp2_htime = 4365,
482 .max_pxcnt = 5279,
483 .hspuls_begin = 88,
484 .hspuls_end = 264,
485 .hspuls_switch = 88,
486 .vspuls_begin = 440,
487 .vspuls_end = 2200,
488 .vspuls_bline = 0,
489 .vspuls_eline = 4,
490 .havon_begin = 526,
491 .havon_end = 4365,
492 .vavon_bline = 20,
493 .vavon_eline = 559,
494 .eqpuls_begin = 2288,
495 .eqpuls_begin_present = true,
496 .eqpuls_end = 2464,
497 .eqpuls_end_present = true,
498 .eqpuls_bline = 0,
499 .eqpuls_bline_present = true,
500 .eqpuls_eline = 4,
501 .eqpuls_eline_present = true,
502 .hso_begin = 142,
503 .hso_end = 230,
504 .vso_begin = 142,
505 .vso_end = 142,
506 .vso_bline = 0,
507 .vso_eline = 5,
508 .vso_eline_present = true,
509 /* sy_val */
510 /* sy2_val */
511 .max_lncnt = 1124,
512 },
513};
514
515union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080p24 = {
516 .encp = {
517 .dvi_settings = 0xd,
518 .video_mode = 0x4040,
519 .video_mode_adv = 0x18,
520 .video_prog_mode = 0x100,
521 .video_prog_mode_present = true,
522 .video_sync_mode = 0x7,
523 .video_sync_mode_present = true,
524 .video_yc_dly = 0,
525 .video_yc_dly_present = true,
526 .video_rgb_ctrl = 2,
527 .video_rgb_ctrl_present = true,
528 .video_filt_ctrl = 0x1052,
529 .video_filt_ctrl_present = true,
530 /* video_ofld_voav_ofst */
531 .yfp1_htime = 271,
532 .yfp2_htime = 2190,
533 .max_pxcnt = 2749,
534 .hspuls_begin = 44,
535 .hspuls_end = 132,
536 .hspuls_switch = 44,
537 .vspuls_begin = 220,
538 .vspuls_end = 2140,
539 .vspuls_bline = 0,
540 .vspuls_eline = 4,
541 .havon_begin = 271,
542 .havon_end = 2190,
543 .vavon_bline = 41,
544 .vavon_eline = 1120,
545 /* eqpuls_begin */
546 /* eqpuls_end */
547 .eqpuls_bline = 0,
548 .eqpuls_bline_present = true,
549 .eqpuls_eline = 4,
550 .eqpuls_eline_present = true,
551 .hso_begin = 79,
552 .hso_end = 123,
553 .vso_begin = 79,
554 .vso_end = 79,
555 .vso_bline = 0,
556 .vso_eline = 5,
557 .vso_eline_present = true,
558 /* sy_val */
559 /* sy2_val */
560 .max_lncnt = 1124,
561 },
562};
563
564union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080p30 = {
565 .encp = {
566 .dvi_settings = 0x1,
567 .video_mode = 0x4040,
568 .video_mode_adv = 0x18,
569 .video_prog_mode = 0x100,
570 .video_prog_mode_present = true,
571 /* video_sync_mode */
572 /* video_yc_dly */
573 /* video_rgb_ctrl */
574 .video_filt_ctrl = 0x1052,
575 .video_filt_ctrl_present = true,
576 /* video_ofld_voav_ofst */
577 .yfp1_htime = 140,
578 .yfp2_htime = 2060,
579 .max_pxcnt = 2199,
580 .hspuls_begin = 2156,
581 .hspuls_end = 44,
582 .hspuls_switch = 44,
583 .vspuls_begin = 140,
584 .vspuls_end = 2059,
585 .vspuls_bline = 0,
586 .vspuls_eline = 4,
587 .havon_begin = 148,
588 .havon_end = 2067,
589 .vavon_bline = 41,
590 .vavon_eline = 1120,
591 /* eqpuls_begin */
592 /* eqpuls_end */
593 /* eqpuls_bline */
594 /* eqpuls_eline */
595 .hso_begin = 44,
596 .hso_end = 2156,
597 .vso_begin = 2100,
598 .vso_end = 2164,
599 .vso_bline = 0,
600 .vso_eline = 5,
601 .vso_eline_present = true,
602 /* sy_val */
603 /* sy2_val */
604 .max_lncnt = 1124,
605 },
606};
607
608union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080p50 = {
609 .encp = {
610 .dvi_settings = 0xd,
611 .video_mode = 0x4040,
612 .video_mode_adv = 0x18,
613 .video_prog_mode = 0x100,
614 .video_prog_mode_present = true,
615 .video_sync_mode = 0x7,
616 .video_sync_mode_present = true,
617 .video_yc_dly = 0,
618 .video_yc_dly_present = true,
619 .video_rgb_ctrl = 2,
620 .video_rgb_ctrl_present = true,
621 /* video_filt_ctrl */
622 /* video_ofld_voav_ofst */
623 .yfp1_htime = 271,
624 .yfp2_htime = 2190,
625 .max_pxcnt = 2639,
626 .hspuls_begin = 44,
627 .hspuls_end = 132,
628 .hspuls_switch = 44,
629 .vspuls_begin = 220,
630 .vspuls_end = 2140,
631 .vspuls_bline = 0,
632 .vspuls_eline = 4,
633 .havon_begin = 271,
634 .havon_end = 2190,
635 .vavon_bline = 41,
636 .vavon_eline = 1120,
637 /* eqpuls_begin */
638 /* eqpuls_end */
639 .eqpuls_bline = 0,
640 .eqpuls_bline_present = true,
641 .eqpuls_eline = 4,
642 .eqpuls_eline_present = true,
643 .hso_begin = 79,
644 .hso_end = 123,
645 .vso_begin = 79,
646 .vso_end = 79,
647 .vso_bline = 0,
648 .vso_eline = 5,
649 .vso_eline_present = true,
650 /* sy_val */
651 /* sy2_val */
652 .max_lncnt = 1124,
653 },
654};
655
656union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080p60 = {
657 .encp = {
658 .dvi_settings = 0x1,
659 .video_mode = 0x4040,
660 .video_mode_adv = 0x18,
661 .video_prog_mode = 0x100,
662 .video_prog_mode_present = true,
663 /* video_sync_mode */
664 /* video_yc_dly */
665 /* video_rgb_ctrl */
666 .video_filt_ctrl = 0x1052,
667 .video_filt_ctrl_present = true,
668 /* video_ofld_voav_ofst */
669 .yfp1_htime = 140,
670 .yfp2_htime = 2060,
671 .max_pxcnt = 2199,
672 .hspuls_begin = 2156,
673 .hspuls_end = 44,
674 .hspuls_switch = 44,
675 .vspuls_begin = 140,
676 .vspuls_end = 2059,
677 .vspuls_bline = 0,
678 .vspuls_eline = 4,
679 .havon_begin = 148,
680 .havon_end = 2067,
681 .vavon_bline = 41,
682 .vavon_eline = 1120,
683 /* eqpuls_begin */
684 /* eqpuls_end */
685 /* eqpuls_bline */
686 /* eqpuls_eline */
687 .hso_begin = 44,
688 .hso_end = 2156,
689 .vso_begin = 2100,
690 .vso_end = 2164,
691 .vso_bline = 0,
692 .vso_eline = 5,
693 .vso_eline_present = true,
694 /* sy_val */
695 /* sy2_val */
696 .max_lncnt = 1124,
697 },
698};
699
700struct meson_hdmi_venc_vic_mode {
701 unsigned int vic;
702 union meson_hdmi_venc_mode *mode;
703} meson_hdmi_venc_vic_modes[] = {
704 { 6, &meson_hdmi_enci_mode_480i },
705 { 7, &meson_hdmi_enci_mode_480i },
706 { 21, &meson_hdmi_enci_mode_576i },
707 { 22, &meson_hdmi_enci_mode_576i },
708 { 2, &meson_hdmi_encp_mode_480p },
709 { 3, &meson_hdmi_encp_mode_480p },
710 { 17, &meson_hdmi_encp_mode_576p },
711 { 18, &meson_hdmi_encp_mode_576p },
712 { 4, &meson_hdmi_encp_mode_720p60 },
713 { 19, &meson_hdmi_encp_mode_720p50 },
714 { 5, &meson_hdmi_encp_mode_1080i60 },
715 { 20, &meson_hdmi_encp_mode_1080i50 },
716 { 32, &meson_hdmi_encp_mode_1080p24 },
717 { 34, &meson_hdmi_encp_mode_1080p30 },
718 { 31, &meson_hdmi_encp_mode_1080p50 },
719 { 16, &meson_hdmi_encp_mode_1080p60 },
720 { 0, NULL}, /* sentinel */
721};
722
723static signed int to_signed(unsigned int a)
724{
725 if (a <= 7)
726 return a;
727 else
728 return a - 16;
729}
730
731static unsigned long modulo(unsigned long a, unsigned long b)
732{
733 if (a >= b)
734 return a - b;
735 else
736 return a;
737}
738
739bool meson_venc_hdmi_supported_vic(int vic)
740{
741 struct meson_hdmi_venc_vic_mode *vmode = meson_hdmi_venc_vic_modes;
742
743 while (vmode->vic && vmode->mode) {
744 if (vmode->vic == vic)
745 return true;
746 vmode++;
747 }
748
749 return false;
750}
751EXPORT_SYMBOL_GPL(meson_venc_hdmi_supported_vic);
752
753static union meson_hdmi_venc_mode *meson_venc_hdmi_get_vic_vmode(int vic)
754{
755 struct meson_hdmi_venc_vic_mode *vmode = meson_hdmi_venc_vic_modes;
756
757 while (vmode->vic && vmode->mode) {
758 if (vmode->vic == vic)
759 return vmode->mode;
760 vmode++;
761 }
762
763 return NULL;
764}
765
766bool meson_venc_hdmi_venc_repeat(int vic)
767{
768 /* Repeat VENC pixels for 480/576i/p, 720p50/60 and 1080p50/60 */
769 if (vic == 6 || vic == 7 || /* 480i */
770 vic == 21 || vic == 22 || /* 576i */
771 vic == 17 || vic == 18 || /* 576p */
772 vic == 2 || vic == 3 || /* 480p */
773 vic == 4 || /* 720p60 */
774 vic == 19 || /* 720p50 */
775 vic == 5 || /* 1080i60 */
776 vic == 20) /* 1080i50 */
777 return true;
778
779 return false;
780}
781EXPORT_SYMBOL_GPL(meson_venc_hdmi_venc_repeat);
782
783void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
784 struct drm_display_mode *mode)
785{
786 union meson_hdmi_venc_mode *vmode = NULL;
787 bool use_enci = false;
788 bool venc_repeat = false;
789 bool hdmi_repeat = false;
790 unsigned int venc_hdmi_latency = 2;
791 unsigned long total_pixels_venc = 0;
792 unsigned long active_pixels_venc = 0;
793 unsigned long front_porch_venc = 0;
794 unsigned long hsync_pixels_venc = 0;
795 unsigned long de_h_begin = 0;
796 unsigned long de_h_end = 0;
797 unsigned long de_v_begin_even = 0;
798 unsigned long de_v_end_even = 0;
799 unsigned long de_v_begin_odd = 0;
800 unsigned long de_v_end_odd = 0;
801 unsigned long hs_begin = 0;
802 unsigned long hs_end = 0;
803 unsigned long vs_adjust = 0;
804 unsigned long vs_bline_evn = 0;
805 unsigned long vs_eline_evn = 0;
806 unsigned long vs_bline_odd = 0;
807 unsigned long vs_eline_odd = 0;
808 unsigned long vso_begin_evn = 0;
809 unsigned long vso_begin_odd = 0;
810 unsigned int eof_lines;
811 unsigned int sof_lines;
812 unsigned int vsync_lines;
813
814 vmode = meson_venc_hdmi_get_vic_vmode(vic);
815 if (!vmode) {
816 dev_err(priv->dev, "%s: Fatal Error, unsupported vic %d\n",
817 __func__, vic);
818 return;
819 }
820
821 /* Use VENCI for 480i and 576i and double HDMI pixels */
822 if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
823 hdmi_repeat = true;
824 use_enci = true;
825 venc_hdmi_latency = 1;
826 }
827
828 /* Repeat VENC pixels for 480/576i/p, 720p50/60 and 1080p50/60 */
829 if (meson_venc_hdmi_venc_repeat(vic))
830 venc_repeat = true;
831
832 eof_lines = mode->vsync_start - mode->vdisplay;
833 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
834 eof_lines /= 2;
835 sof_lines = mode->vtotal - mode->vsync_end;
836 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
837 sof_lines /= 2;
838 vsync_lines = mode->vsync_end - mode->vsync_start;
839 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
840 vsync_lines /= 2;
841
842 total_pixels_venc = mode->htotal;
843 if (hdmi_repeat)
844 total_pixels_venc /= 2;
845 if (venc_repeat)
846 total_pixels_venc *= 2;
847
848 active_pixels_venc = mode->hdisplay;
849 if (hdmi_repeat)
850 active_pixels_venc /= 2;
851 if (venc_repeat)
852 active_pixels_venc *= 2;
853
854 front_porch_venc = (mode->hsync_start - mode->hdisplay);
855 if (hdmi_repeat)
856 front_porch_venc /= 2;
857 if (venc_repeat)
858 front_porch_venc *= 2;
859
860 hsync_pixels_venc = (mode->hsync_end - mode->hsync_start);
861 if (hdmi_repeat)
862 hsync_pixels_venc /= 2;
863 if (venc_repeat)
864 hsync_pixels_venc *= 2;
865
866 /* Disable VDACs */
867 writel_bits_relaxed(0x1f, 0x1f,
868 priv->io_base + _REG(VENC_VDAC_SETTING));
869
870 writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN));
871 writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
872
873 if (use_enci) {
874 unsigned int lines_f0;
875 unsigned int lines_f1;
876
877 /* CVBS Filter settings */
878 writel_relaxed(0x12, priv->io_base + _REG(ENCI_CFILT_CTRL));
879 writel_relaxed(0x12, priv->io_base + _REG(ENCI_CFILT_CTRL2));
880
881 /* Digital Video Select : Interlace, clk27 clk, external */
882 writel_relaxed(0, priv->io_base + _REG(VENC_DVI_SETTING));
883
884 /* Reset Video Mode */
885 writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_MODE));
886 writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_MODE_ADV));
887
888 /* Horizontal sync signal output */
889 writel_relaxed(vmode->enci.hso_begin,
890 priv->io_base + _REG(ENCI_SYNC_HSO_BEGIN));
891 writel_relaxed(vmode->enci.hso_end,
892 priv->io_base + _REG(ENCI_SYNC_HSO_END));
893
894 /* Vertical Sync lines */
895 writel_relaxed(vmode->enci.vso_even,
896 priv->io_base + _REG(ENCI_SYNC_VSO_EVNLN));
897 writel_relaxed(vmode->enci.vso_odd,
898 priv->io_base + _REG(ENCI_SYNC_VSO_ODDLN));
899
900 /* Macrovision max amplitude change */
901 writel_relaxed(vmode->enci.macv_max_amp,
902 priv->io_base + _REG(ENCI_MACV_MAX_AMP));
903
904 /* Video mode */
905 writel_relaxed(vmode->enci.video_prog_mode,
906 priv->io_base + _REG(VENC_VIDEO_PROG_MODE));
907 writel_relaxed(vmode->enci.video_mode,
908 priv->io_base + _REG(ENCI_VIDEO_MODE));
909
910 /* Advanced Video Mode :
911 * Demux shifting 0x2
912 * Blank line end at line17/22
913 * High bandwidth Luma Filter
914 * Low bandwidth Chroma Filter
915 * Bypass luma low pass filter
916 * No macrovision on CSYNC
917 */
918 writel_relaxed(0x26, priv->io_base + _REG(ENCI_VIDEO_MODE_ADV));
919
920 writel(vmode->enci.sch_adjust,
921 priv->io_base + _REG(ENCI_VIDEO_SCH));
922
923 /* Sync mode : MASTER Master mode, free run, send HSO/VSO out */
924 writel_relaxed(0x07, priv->io_base + _REG(ENCI_SYNC_MODE));
925
926 if (vmode->enci.yc_delay)
927 writel_relaxed(vmode->enci.yc_delay,
928 priv->io_base + _REG(ENCI_YC_DELAY));
929
930
931 /* UNreset Interlaced TV Encoder */
932 writel_relaxed(0, priv->io_base + _REG(ENCI_DBG_PX_RST));
933
934 /* Enable Vfifo2vd, Y_Cb_Y_Cr select */
935 writel_relaxed(0x4e01, priv->io_base + _REG(ENCI_VFIFO2VD_CTL));
936
937 /* Timings */
938 writel_relaxed(vmode->enci.pixel_start,
939 priv->io_base + _REG(ENCI_VFIFO2VD_PIXEL_START));
940 writel_relaxed(vmode->enci.pixel_end,
941 priv->io_base + _REG(ENCI_VFIFO2VD_PIXEL_END));
942
943 writel_relaxed(vmode->enci.top_field_line_start,
944 priv->io_base + _REG(ENCI_VFIFO2VD_LINE_TOP_START));
945 writel_relaxed(vmode->enci.top_field_line_end,
946 priv->io_base + _REG(ENCI_VFIFO2VD_LINE_TOP_END));
947
948 writel_relaxed(vmode->enci.bottom_field_line_start,
949 priv->io_base + _REG(ENCI_VFIFO2VD_LINE_BOT_START));
950 writel_relaxed(vmode->enci.bottom_field_line_end,
951 priv->io_base + _REG(ENCI_VFIFO2VD_LINE_BOT_END));
952
953 /* Select ENCI for VIU */
954 meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCI);
955
956 /* Interlace video enable */
957 writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN));
958
959 lines_f0 = mode->vtotal >> 1;
960 lines_f1 = lines_f0 + 1;
961
962 de_h_begin = modulo(readl_relaxed(priv->io_base +
963 _REG(ENCI_VFIFO2VD_PIXEL_START))
964 + venc_hdmi_latency,
965 total_pixels_venc);
966 de_h_end = modulo(de_h_begin + active_pixels_venc,
967 total_pixels_venc);
968
969 writel_relaxed(de_h_begin,
970 priv->io_base + _REG(ENCI_DE_H_BEGIN));
971 writel_relaxed(de_h_end,
972 priv->io_base + _REG(ENCI_DE_H_END));
973
974 de_v_begin_even = readl_relaxed(priv->io_base +
975 _REG(ENCI_VFIFO2VD_LINE_TOP_START));
976 de_v_end_even = de_v_begin_even + mode->vdisplay;
977 de_v_begin_odd = readl_relaxed(priv->io_base +
978 _REG(ENCI_VFIFO2VD_LINE_BOT_START));
979 de_v_end_odd = de_v_begin_odd + mode->vdisplay;
980
981 writel_relaxed(de_v_begin_even,
982 priv->io_base + _REG(ENCI_DE_V_BEGIN_EVEN));
983 writel_relaxed(de_v_end_even,
984 priv->io_base + _REG(ENCI_DE_V_END_EVEN));
985 writel_relaxed(de_v_begin_odd,
986 priv->io_base + _REG(ENCI_DE_V_BEGIN_ODD));
987 writel_relaxed(de_v_end_odd,
988 priv->io_base + _REG(ENCI_DE_V_END_ODD));
989
990 /* Program Hsync timing */
991 hs_begin = de_h_end + front_porch_venc;
992 if (de_h_end + front_porch_venc >= total_pixels_venc) {
993 hs_begin -= total_pixels_venc;
994 vs_adjust = 1;
995 } else {
996 hs_begin = de_h_end + front_porch_venc;
997 vs_adjust = 0;
998 }
999
1000 hs_end = modulo(hs_begin + hsync_pixels_venc,
1001 total_pixels_venc);
1002 writel_relaxed(hs_begin,
1003 priv->io_base + _REG(ENCI_DVI_HSO_BEGIN));
1004 writel_relaxed(hs_end,
1005 priv->io_base + _REG(ENCI_DVI_HSO_END));
1006
1007 /* Program Vsync timing for even field */
1008 if (((de_v_end_odd - 1) + eof_lines + vs_adjust) >= lines_f1) {
1009 vs_bline_evn = (de_v_end_odd - 1)
1010 + eof_lines
1011 + vs_adjust
1012 - lines_f1;
1013 vs_eline_evn = vs_bline_evn + vsync_lines;
1014
1015 writel_relaxed(vs_bline_evn,
1016 priv->io_base + _REG(ENCI_DVI_VSO_BLINE_EVN));
1017
1018 writel_relaxed(vs_eline_evn,
1019 priv->io_base + _REG(ENCI_DVI_VSO_ELINE_EVN));
1020
1021 writel_relaxed(hs_begin,
1022 priv->io_base + _REG(ENCI_DVI_VSO_BEGIN_EVN));
1023 writel_relaxed(hs_begin,
1024 priv->io_base + _REG(ENCI_DVI_VSO_END_EVN));
1025 } else {
1026 vs_bline_odd = (de_v_end_odd - 1)
1027 + eof_lines
1028 + vs_adjust;
1029
1030 writel_relaxed(vs_bline_odd,
1031 priv->io_base + _REG(ENCI_DVI_VSO_BLINE_ODD));
1032
1033 writel_relaxed(hs_begin,
1034 priv->io_base + _REG(ENCI_DVI_VSO_BEGIN_ODD));
1035
1036 if ((vs_bline_odd + vsync_lines) >= lines_f1) {
1037 vs_eline_evn = vs_bline_odd
1038 + vsync_lines
1039 - lines_f1;
1040
1041 writel_relaxed(vs_eline_evn, priv->io_base
1042 + _REG(ENCI_DVI_VSO_ELINE_EVN));
1043
1044 writel_relaxed(hs_begin, priv->io_base
1045 + _REG(ENCI_DVI_VSO_END_EVN));
1046 } else {
1047 vs_eline_odd = vs_bline_odd
1048 + vsync_lines;
1049
1050 writel_relaxed(vs_eline_odd, priv->io_base
1051 + _REG(ENCI_DVI_VSO_ELINE_ODD));
1052
1053 writel_relaxed(hs_begin, priv->io_base
1054 + _REG(ENCI_DVI_VSO_END_ODD));
1055 }
1056 }
1057
1058 /* Program Vsync timing for odd field */
1059 if (((de_v_end_even - 1) + (eof_lines + 1)) >= lines_f0) {
1060 vs_bline_odd = (de_v_end_even - 1)
1061 + (eof_lines + 1)
1062 - lines_f0;
1063 vs_eline_odd = vs_bline_odd + vsync_lines;
1064
1065 writel_relaxed(vs_bline_odd,
1066 priv->io_base + _REG(ENCI_DVI_VSO_BLINE_ODD));
1067
1068 writel_relaxed(vs_eline_odd,
1069 priv->io_base + _REG(ENCI_DVI_VSO_ELINE_ODD));
1070
1071 vso_begin_odd = modulo(hs_begin
1072 + (total_pixels_venc >> 1),
1073 total_pixels_venc);
1074
1075 writel_relaxed(vso_begin_odd,
1076 priv->io_base + _REG(ENCI_DVI_VSO_BEGIN_ODD));
1077 writel_relaxed(vso_begin_odd,
1078 priv->io_base + _REG(ENCI_DVI_VSO_END_ODD));
1079 } else {
1080 vs_bline_evn = (de_v_end_even - 1)
1081 + (eof_lines + 1);
1082
1083 writel_relaxed(vs_bline_evn,
1084 priv->io_base + _REG(ENCI_DVI_VSO_BLINE_EVN));
1085
1086 vso_begin_evn = modulo(hs_begin
1087 + (total_pixels_venc >> 1),
1088 total_pixels_venc);
1089
1090 writel_relaxed(vso_begin_evn, priv->io_base
1091 + _REG(ENCI_DVI_VSO_BEGIN_EVN));
1092
1093 if (vs_bline_evn + vsync_lines >= lines_f0) {
1094 vs_eline_odd = vs_bline_evn
1095 + vsync_lines
1096 - lines_f0;
1097
1098 writel_relaxed(vs_eline_odd, priv->io_base
1099 + _REG(ENCI_DVI_VSO_ELINE_ODD));
1100
1101 writel_relaxed(vso_begin_evn, priv->io_base
1102 + _REG(ENCI_DVI_VSO_END_ODD));
1103 } else {
1104 vs_eline_evn = vs_bline_evn + vsync_lines;
1105
1106 writel_relaxed(vs_eline_evn, priv->io_base
1107 + _REG(ENCI_DVI_VSO_ELINE_EVN));
1108
1109 writel_relaxed(vso_begin_evn, priv->io_base
1110 + _REG(ENCI_DVI_VSO_END_EVN));
1111 }
1112 }
1113 } else {
1114 writel_relaxed(vmode->encp.dvi_settings,
1115 priv->io_base + _REG(VENC_DVI_SETTING));
1116 writel_relaxed(vmode->encp.video_mode,
1117 priv->io_base + _REG(ENCP_VIDEO_MODE));
1118 writel_relaxed(vmode->encp.video_mode_adv,
1119 priv->io_base + _REG(ENCP_VIDEO_MODE_ADV));
1120 if (vmode->encp.video_prog_mode_present)
1121 writel_relaxed(vmode->encp.video_prog_mode,
1122 priv->io_base + _REG(VENC_VIDEO_PROG_MODE));
1123 if (vmode->encp.video_sync_mode_present)
1124 writel_relaxed(vmode->encp.video_sync_mode,
1125 priv->io_base + _REG(ENCP_VIDEO_SYNC_MODE));
1126 if (vmode->encp.video_yc_dly_present)
1127 writel_relaxed(vmode->encp.video_yc_dly,
1128 priv->io_base + _REG(ENCP_VIDEO_YC_DLY));
1129 if (vmode->encp.video_rgb_ctrl_present)
1130 writel_relaxed(vmode->encp.video_rgb_ctrl,
1131 priv->io_base + _REG(ENCP_VIDEO_RGB_CTRL));
1132 if (vmode->encp.video_filt_ctrl_present)
1133 writel_relaxed(vmode->encp.video_filt_ctrl,
1134 priv->io_base + _REG(ENCP_VIDEO_FILT_CTRL));
1135 if (vmode->encp.video_ofld_voav_ofst_present)
1136 writel_relaxed(vmode->encp.video_ofld_voav_ofst,
1137 priv->io_base
1138 + _REG(ENCP_VIDEO_OFLD_VOAV_OFST));
1139 writel_relaxed(vmode->encp.yfp1_htime,
1140 priv->io_base + _REG(ENCP_VIDEO_YFP1_HTIME));
1141 writel_relaxed(vmode->encp.yfp2_htime,
1142 priv->io_base + _REG(ENCP_VIDEO_YFP2_HTIME));
1143 writel_relaxed(vmode->encp.max_pxcnt,
1144 priv->io_base + _REG(ENCP_VIDEO_MAX_PXCNT));
1145 writel_relaxed(vmode->encp.hspuls_begin,
1146 priv->io_base + _REG(ENCP_VIDEO_HSPULS_BEGIN));
1147 writel_relaxed(vmode->encp.hspuls_end,
1148 priv->io_base + _REG(ENCP_VIDEO_HSPULS_END));
1149 writel_relaxed(vmode->encp.hspuls_switch,
1150 priv->io_base + _REG(ENCP_VIDEO_HSPULS_SWITCH));
1151 writel_relaxed(vmode->encp.vspuls_begin,
1152 priv->io_base + _REG(ENCP_VIDEO_VSPULS_BEGIN));
1153 writel_relaxed(vmode->encp.vspuls_end,
1154 priv->io_base + _REG(ENCP_VIDEO_VSPULS_END));
1155 writel_relaxed(vmode->encp.vspuls_bline,
1156 priv->io_base + _REG(ENCP_VIDEO_VSPULS_BLINE));
1157 writel_relaxed(vmode->encp.vspuls_eline,
1158 priv->io_base + _REG(ENCP_VIDEO_VSPULS_ELINE));
1159 if (vmode->encp.eqpuls_begin_present)
1160 writel_relaxed(vmode->encp.eqpuls_begin,
1161 priv->io_base + _REG(ENCP_VIDEO_EQPULS_BEGIN));
1162 if (vmode->encp.eqpuls_end_present)
1163 writel_relaxed(vmode->encp.eqpuls_end,
1164 priv->io_base + _REG(ENCP_VIDEO_EQPULS_END));
1165 if (vmode->encp.eqpuls_bline_present)
1166 writel_relaxed(vmode->encp.eqpuls_bline,
1167 priv->io_base + _REG(ENCP_VIDEO_EQPULS_BLINE));
1168 if (vmode->encp.eqpuls_eline_present)
1169 writel_relaxed(vmode->encp.eqpuls_eline,
1170 priv->io_base + _REG(ENCP_VIDEO_EQPULS_ELINE));
1171 writel_relaxed(vmode->encp.havon_begin,
1172 priv->io_base + _REG(ENCP_VIDEO_HAVON_BEGIN));
1173 writel_relaxed(vmode->encp.havon_end,
1174 priv->io_base + _REG(ENCP_VIDEO_HAVON_END));
1175 writel_relaxed(vmode->encp.vavon_bline,
1176 priv->io_base + _REG(ENCP_VIDEO_VAVON_BLINE));
1177 writel_relaxed(vmode->encp.vavon_eline,
1178 priv->io_base + _REG(ENCP_VIDEO_VAVON_ELINE));
1179 writel_relaxed(vmode->encp.hso_begin,
1180 priv->io_base + _REG(ENCP_VIDEO_HSO_BEGIN));
1181 writel_relaxed(vmode->encp.hso_end,
1182 priv->io_base + _REG(ENCP_VIDEO_HSO_END));
1183 writel_relaxed(vmode->encp.vso_begin,
1184 priv->io_base + _REG(ENCP_VIDEO_VSO_BEGIN));
1185 writel_relaxed(vmode->encp.vso_end,
1186 priv->io_base + _REG(ENCP_VIDEO_VSO_END));
1187 writel_relaxed(vmode->encp.vso_bline,
1188 priv->io_base + _REG(ENCP_VIDEO_VSO_BLINE));
1189 if (vmode->encp.vso_eline_present)
1190 writel_relaxed(vmode->encp.vso_eline,
1191 priv->io_base + _REG(ENCP_VIDEO_VSO_ELINE));
1192 if (vmode->encp.sy_val_present)
1193 writel_relaxed(vmode->encp.sy_val,
1194 priv->io_base + _REG(ENCP_VIDEO_SY_VAL));
1195 if (vmode->encp.sy2_val_present)
1196 writel_relaxed(vmode->encp.sy2_val,
1197 priv->io_base + _REG(ENCP_VIDEO_SY2_VAL));
1198 writel_relaxed(vmode->encp.max_lncnt,
1199 priv->io_base + _REG(ENCP_VIDEO_MAX_LNCNT));
1200
1201 writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN));
1202
1203 /* Set DE signal’s polarity is active high */
1204 writel_bits_relaxed(BIT(14), BIT(14),
1205 priv->io_base + _REG(ENCP_VIDEO_MODE));
1206
1207 /* Program DE timing */
1208 de_h_begin = modulo(readl_relaxed(priv->io_base +
1209 _REG(ENCP_VIDEO_HAVON_BEGIN))
1210 + venc_hdmi_latency,
1211 total_pixels_venc);
1212 de_h_end = modulo(de_h_begin + active_pixels_venc,
1213 total_pixels_venc);
1214
1215 writel_relaxed(de_h_begin,
1216 priv->io_base + _REG(ENCP_DE_H_BEGIN));
1217 writel_relaxed(de_h_end,
1218 priv->io_base + _REG(ENCP_DE_H_END));
1219
1220 /* Program DE timing for even field */
1221 de_v_begin_even = readl_relaxed(priv->io_base
1222 + _REG(ENCP_VIDEO_VAVON_BLINE));
1223 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1224 de_v_end_even = de_v_begin_even +
1225 (mode->vdisplay / 2);
1226 else
1227 de_v_end_even = de_v_begin_even + mode->vdisplay;
1228
1229 writel_relaxed(de_v_begin_even,
1230 priv->io_base + _REG(ENCP_DE_V_BEGIN_EVEN));
1231 writel_relaxed(de_v_end_even,
1232 priv->io_base + _REG(ENCP_DE_V_END_EVEN));
1233
1234 /* Program DE timing for odd field if needed */
1235 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1236 unsigned int ofld_voav_ofst =
1237 readl_relaxed(priv->io_base +
1238 _REG(ENCP_VIDEO_OFLD_VOAV_OFST));
1239 de_v_begin_odd = to_signed((ofld_voav_ofst & 0xf0) >> 4)
1240 + de_v_begin_even
1241 + ((mode->vtotal - 1) / 2);
1242 de_v_end_odd = de_v_begin_odd + (mode->vdisplay / 2);
1243
1244 writel_relaxed(de_v_begin_odd,
1245 priv->io_base + _REG(ENCP_DE_V_BEGIN_ODD));
1246 writel_relaxed(de_v_end_odd,
1247 priv->io_base + _REG(ENCP_DE_V_END_ODD));
1248 }
1249
1250 /* Program Hsync timing */
1251 if ((de_h_end + front_porch_venc) >= total_pixels_venc) {
1252 hs_begin = de_h_end
1253 + front_porch_venc
1254 - total_pixels_venc;
1255 vs_adjust = 1;
1256 } else {
1257 hs_begin = de_h_end
1258 + front_porch_venc;
1259 vs_adjust = 0;
1260 }
1261
1262 hs_end = modulo(hs_begin + hsync_pixels_venc,
1263 total_pixels_venc);
1264
1265 writel_relaxed(hs_begin,
1266 priv->io_base + _REG(ENCP_DVI_HSO_BEGIN));
1267 writel_relaxed(hs_end,
1268 priv->io_base + _REG(ENCP_DVI_HSO_END));
1269
1270 /* Program Vsync timing for even field */
1271 if (de_v_begin_even >=
1272 (sof_lines + vsync_lines + (1 - vs_adjust)))
1273 vs_bline_evn = de_v_begin_even
1274 - sof_lines
1275 - vsync_lines
1276 - (1 - vs_adjust);
1277 else
1278 vs_bline_evn = mode->vtotal
1279 + de_v_begin_even
1280 - sof_lines
1281 - vsync_lines
1282 - (1 - vs_adjust);
1283
1284 vs_eline_evn = modulo(vs_bline_evn + vsync_lines,
1285 mode->vtotal);
1286
1287 writel_relaxed(vs_bline_evn,
1288 priv->io_base + _REG(ENCP_DVI_VSO_BLINE_EVN));
1289 writel_relaxed(vs_eline_evn,
1290 priv->io_base + _REG(ENCP_DVI_VSO_ELINE_EVN));
1291
1292 vso_begin_evn = hs_begin;
1293 writel_relaxed(vso_begin_evn,
1294 priv->io_base + _REG(ENCP_DVI_VSO_BEGIN_EVN));
1295 writel_relaxed(vso_begin_evn,
1296 priv->io_base + _REG(ENCP_DVI_VSO_END_EVN));
1297
1298 /* Program Vsync timing for odd field if needed */
1299 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1300 vs_bline_odd = (de_v_begin_odd - 1)
1301 - sof_lines
1302 - vsync_lines;
1303 vs_eline_odd = (de_v_begin_odd - 1)
1304 - vsync_lines;
1305 vso_begin_odd = modulo(hs_begin
1306 + (total_pixels_venc >> 1),
1307 total_pixels_venc);
1308
1309 writel_relaxed(vs_bline_odd,
1310 priv->io_base + _REG(ENCP_DVI_VSO_BLINE_ODD));
1311 writel_relaxed(vs_eline_odd,
1312 priv->io_base + _REG(ENCP_DVI_VSO_ELINE_ODD));
1313 writel_relaxed(vso_begin_odd,
1314 priv->io_base + _REG(ENCP_DVI_VSO_BEGIN_ODD));
1315 writel_relaxed(vso_begin_odd,
1316 priv->io_base + _REG(ENCP_DVI_VSO_END_ODD));
1317 }
1318
1319 /* Select ENCP for VIU */
1320 meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCP);
1321 }
1322
1323 writel_relaxed((use_enci ? 1 : 2) |
1324 (mode->flags & DRM_MODE_FLAG_PHSYNC ? 1 << 2 : 0) |
1325 (mode->flags & DRM_MODE_FLAG_PVSYNC ? 1 << 3 : 0) |
1326 4 << 5 |
1327 (venc_repeat ? 1 << 8 : 0) |
1328 (hdmi_repeat ? 1 << 12 : 0),
1329 priv->io_base + _REG(VPU_HDMI_SETTING));
1330
1331 priv->venc.hdmi_repeat = hdmi_repeat;
1332 priv->venc.venc_repeat = venc_repeat;
1333 priv->venc.hdmi_use_enci = use_enci;
1334
1335 priv->venc.current_mode = MESON_VENC_MODE_HDMI;
1336}
1337EXPORT_SYMBOL_GPL(meson_venc_hdmi_mode_set);
1338
94void meson_venci_cvbs_mode_set(struct meson_drm *priv, 1339void meson_venci_cvbs_mode_set(struct meson_drm *priv,
95 struct meson_cvbs_enci_mode *mode) 1340 struct meson_cvbs_enci_mode *mode)
96{ 1341{
@@ -223,9 +1468,6 @@ void meson_venci_cvbs_mode_set(struct meson_drm *priv,
223 writel_relaxed(mode->analog_sync_adj, 1468 writel_relaxed(mode->analog_sync_adj,
224 priv->io_base + _REG(ENCI_SYNC_ADJ)); 1469 priv->io_base + _REG(ENCI_SYNC_ADJ));
225 1470
226 /* Setup 27MHz vclk2 for ENCI and VDAC */
227 meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, MESON_VCLK_CVBS);
228
229 priv->venc.current_mode = mode->mode_tag; 1471 priv->venc.current_mode = mode->mode_tag;
230} 1472}
231 1473
diff --git a/drivers/gpu/drm/meson/meson_venc.h b/drivers/gpu/drm/meson/meson_venc.h
index 77d4a7d82c44..a1b96e898c14 100644
--- a/drivers/gpu/drm/meson/meson_venc.h
+++ b/drivers/gpu/drm/meson/meson_venc.h
@@ -30,6 +30,7 @@ enum {
30 MESON_VENC_MODE_NONE = 0, 30 MESON_VENC_MODE_NONE = 0,
31 MESON_VENC_MODE_CVBS_PAL, 31 MESON_VENC_MODE_CVBS_PAL,
32 MESON_VENC_MODE_CVBS_NTSC, 32 MESON_VENC_MODE_CVBS_NTSC,
33 MESON_VENC_MODE_HDMI,
33}; 34};
34 35
35struct meson_cvbs_enci_mode { 36struct meson_cvbs_enci_mode {
@@ -56,12 +57,18 @@ struct meson_cvbs_enci_mode {
56 unsigned int analog_sync_adj; 57 unsigned int analog_sync_adj;
57}; 58};
58 59
60/* HDMI Clock parameters */
61bool meson_venc_hdmi_supported_vic(int vic);
62bool meson_venc_hdmi_venc_repeat(int vic);
63
59/* CVBS Timings and Parameters */ 64/* CVBS Timings and Parameters */
60extern struct meson_cvbs_enci_mode meson_cvbs_enci_pal; 65extern struct meson_cvbs_enci_mode meson_cvbs_enci_pal;
61extern struct meson_cvbs_enci_mode meson_cvbs_enci_ntsc; 66extern struct meson_cvbs_enci_mode meson_cvbs_enci_ntsc;
62 67
63void meson_venci_cvbs_mode_set(struct meson_drm *priv, 68void meson_venci_cvbs_mode_set(struct meson_drm *priv,
64 struct meson_cvbs_enci_mode *mode); 69 struct meson_cvbs_enci_mode *mode);
70void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
71 struct drm_display_mode *mode);
65unsigned int meson_venci_get_field(struct meson_drm *priv); 72unsigned int meson_venci_get_field(struct meson_drm *priv);
66 73
67void meson_venc_enable_vsync(struct meson_drm *priv); 74void meson_venc_enable_vsync(struct meson_drm *priv);
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
index a2bcc70a03ef..00775b397dba 100644
--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
@@ -32,6 +32,7 @@
32 32
33#include "meson_venc_cvbs.h" 33#include "meson_venc_cvbs.h"
34#include "meson_venc.h" 34#include "meson_venc.h"
35#include "meson_vclk.h"
35#include "meson_registers.h" 36#include "meson_registers.h"
36 37
37/* HHI VDAC Registers */ 38/* HHI VDAC Registers */
@@ -194,14 +195,20 @@ static void meson_venc_cvbs_encoder_mode_set(struct drm_encoder *encoder,
194{ 195{
195 struct meson_venc_cvbs *meson_venc_cvbs = 196 struct meson_venc_cvbs *meson_venc_cvbs =
196 encoder_to_meson_venc_cvbs(encoder); 197 encoder_to_meson_venc_cvbs(encoder);
198 struct meson_drm *priv = meson_venc_cvbs->priv;
197 int i; 199 int i;
198 200
199 for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) { 201 for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
200 struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i]; 202 struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
201 203
202 if (drm_mode_equal(mode, &meson_mode->mode)) { 204 if (drm_mode_equal(mode, &meson_mode->mode)) {
203 meson_venci_cvbs_mode_set(meson_venc_cvbs->priv, 205 meson_venci_cvbs_mode_set(priv,
204 meson_mode->enci); 206 meson_mode->enci);
207
208 /* Setup 27MHz vclk2 for ENCI and VDAC */
209 meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS,
210 MESON_VCLK_CVBS, MESON_VCLK_CVBS,
211 MESON_VCLK_CVBS, true);
205 break; 212 break;
206 } 213 }
207 } 214 }
@@ -217,25 +224,14 @@ static const struct drm_encoder_helper_funcs
217 224
218static bool meson_venc_cvbs_connector_is_available(struct meson_drm *priv) 225static bool meson_venc_cvbs_connector_is_available(struct meson_drm *priv)
219{ 226{
220 struct device_node *ep, *remote; 227 struct device_node *remote;
221 228
222 /* CVBS VDAC output is on the first port, first endpoint */ 229 remote = of_graph_get_remote_node(priv->dev->of_node, 0, 0);
223 ep = of_graph_get_endpoint_by_regs(priv->dev->of_node, 0, 0); 230 if (!remote)
224 if (!ep)
225 return false; 231 return false;
226 232
227
228 /* If the endpoint node exists, consider it enabled */
229 remote = of_graph_get_remote_port(ep);
230 if (remote) {
231 of_node_put(ep);
232 return true;
233 }
234
235 of_node_put(ep);
236 of_node_put(remote); 233 of_node_put(remote);
237 234 return true;
238 return false;
239} 235}
240 236
241int meson_venc_cvbs_create(struct meson_drm *priv) 237int meson_venc_cvbs_create(struct meson_drm *priv)
@@ -248,7 +244,7 @@ int meson_venc_cvbs_create(struct meson_drm *priv)
248 244
249 if (!meson_venc_cvbs_connector_is_available(priv)) { 245 if (!meson_venc_cvbs_connector_is_available(priv)) {
250 dev_info(drm->dev, "CVBS Output connector not available\n"); 246 dev_info(drm->dev, "CVBS Output connector not available\n");
251 return -ENODEV; 247 return 0;
252 } 248 }
253 249
254 meson_venc_cvbs = devm_kzalloc(priv->dev, sizeof(*meson_venc_cvbs), 250 meson_venc_cvbs = devm_kzalloc(priv->dev, sizeof(*meson_venc_cvbs),
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index a6de8ba7af19..6bcfa527c180 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -28,9 +28,12 @@
28#include "meson_canvas.h" 28#include "meson_canvas.h"
29#include "meson_registers.h" 29#include "meson_registers.h"
30 30
31/* 31/**
32 * DOC: Video Input Unit
33 *
32 * VIU Handles the Pixel scanout and the basic Colorspace conversions 34 * VIU Handles the Pixel scanout and the basic Colorspace conversions
33 * We handle the following features : 35 * We handle the following features :
36 *
34 * - OSD1 RGB565/RGB888/xRGB8888 scanout 37 * - OSD1 RGB565/RGB888/xRGB8888 scanout
35 * - RGB conversion to x/cb/cr 38 * - RGB conversion to x/cb/cr
36 * - Progressive or Interlace buffer scanout 39 * - Progressive or Interlace buffer scanout
@@ -38,6 +41,7 @@
38 * - HDR OSD matrix for GXL/GXM 41 * - HDR OSD matrix for GXL/GXM
39 * 42 *
40 * What is missing : 43 * What is missing :
44 *
41 * - BGR888/xBGR8888/BGRx8888/BGRx8888 modes 45 * - BGR888/xBGR8888/BGRx8888/BGRx8888 modes
42 * - YUV4:2:2 Y0CbY1Cr scanout 46 * - YUV4:2:2 Y0CbY1Cr scanout
43 * - Conversion to YUV 4:4:4 from 4:2:2 input 47 * - Conversion to YUV 4:4:4 from 4:2:2 input
diff --git a/drivers/gpu/drm/meson/meson_vpp.c b/drivers/gpu/drm/meson/meson_vpp.c
index 671909d8672e..27356f81a0ab 100644
--- a/drivers/gpu/drm/meson/meson_vpp.c
+++ b/drivers/gpu/drm/meson/meson_vpp.c
@@ -25,16 +25,20 @@
25#include "meson_vpp.h" 25#include "meson_vpp.h"
26#include "meson_registers.h" 26#include "meson_registers.h"
27 27
28/* 28/**
29 * DOC: Video Post Processing
30 *
29 * VPP Handles all the Post Processing after the Scanout from the VIU 31 * VPP Handles all the Post Processing after the Scanout from the VIU
30 * We handle the following post processings : 32 * We handle the following post processings :
31 * - Postblend : Blends the OSD1 only 33 *
34 * - Postblend, Blends the OSD1 only
32 * We exclude OSD2, VS1, VS1 and Preblend output 35 * We exclude OSD2, VS1, VS1 and Preblend output
33 * - Vertical OSD Scaler for OSD1 only, we disable vertical scaler and 36 * - Vertical OSD Scaler for OSD1 only, we disable vertical scaler and
34 * use it only for interlace scanout 37 * use it only for interlace scanout
35 * - Intermediate FIFO with default Amlogic values 38 * - Intermediate FIFO with default Amlogic values
36 * 39 *
37 * What is missing : 40 * What is missing :
41 *
38 * - Preblend for video overlay pre-scaling 42 * - Preblend for video overlay pre-scaling
39 * - OSD2 support for cursor framebuffer 43 * - OSD2 support for cursor framebuffer
40 * - Video pre-scaling before postblend 44 * - Video pre-scaling before postblend
diff --git a/drivers/gpu/drm/meson/meson_vpp.h b/drivers/gpu/drm/meson/meson_vpp.h
index ede3b26e0f22..815177cc7dfd 100644
--- a/drivers/gpu/drm/meson/meson_vpp.h
+++ b/drivers/gpu/drm/meson/meson_vpp.h
@@ -23,6 +23,8 @@
23 23
24/* Mux VIU/VPP to ENCI */ 24/* Mux VIU/VPP to ENCI */
25#define MESON_VIU_VPP_MUX_ENCI 0x5 25#define MESON_VIU_VPP_MUX_ENCI 0x5
26/* Mux VIU/VPP to ENCP */
27#define MESON_VIU_VPP_MUX_ENCP 0xA
26 28
27void meson_vpp_setup_mux(struct meson_drm *priv, unsigned int mux); 29void meson_vpp_setup_mux(struct meson_drm *priv, unsigned int mux);
28 30
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index f2e9b2bc18a5..adb411a078e8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1393,7 +1393,8 @@ static void mga_crtc_commit(struct drm_crtc *crtc)
1393 * but it's a requirement that we provide the function 1393 * but it's a requirement that we provide the function
1394 */ 1394 */
1395static int mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 1395static int mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
1396 u16 *blue, uint32_t size) 1396 u16 *blue, uint32_t size,
1397 struct drm_modeset_acquire_ctx *ctx)
1397{ 1398{
1398 struct mga_crtc *mga_crtc = to_mga_crtc(crtc); 1399 struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
1399 int i; 1400 int i;
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 657598bb1e6b..565a217b46f2 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -236,6 +236,7 @@ struct ttm_bo_driver mgag200_bo_driver = {
236 .verify_access = mgag200_bo_verify_access, 236 .verify_access = mgag200_bo_verify_access,
237 .io_mem_reserve = &mgag200_ttm_io_mem_reserve, 237 .io_mem_reserve = &mgag200_ttm_io_mem_reserve,
238 .io_mem_free = &mgag200_ttm_io_mem_free, 238 .io_mem_free = &mgag200_ttm_io_mem_free,
239 .io_mem_pfn = ttm_bo_default_io_mem_pfn,
239}; 240};
240 241
241int mgag200_mm_init(struct mga_device *mdev) 242int mgag200_mm_init(struct mga_device *mdev)
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 39055362da95..5241ac8803ba 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -40,6 +40,7 @@ msm-y := \
40 mdp/mdp5/mdp5_mdss.o \ 40 mdp/mdp5/mdp5_mdss.o \
41 mdp/mdp5/mdp5_kms.o \ 41 mdp/mdp5/mdp5_kms.o \
42 mdp/mdp5/mdp5_pipe.o \ 42 mdp/mdp5/mdp5_pipe.o \
43 mdp/mdp5/mdp5_mixer.o \
43 mdp/mdp5/mdp5_plane.o \ 44 mdp/mdp5/mdp5_plane.o \
44 mdp/mdp5/mdp5_smp.o \ 45 mdp/mdp5/mdp5_smp.o \
45 msm_atomic.o \ 46 msm_atomic.o \
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index b999349b7d2d..7fd77958a436 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -412,10 +412,8 @@ static const unsigned int a3xx_registers[] = {
412#ifdef CONFIG_DEBUG_FS 412#ifdef CONFIG_DEBUG_FS
413static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m) 413static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
414{ 414{
415 gpu->funcs->pm_resume(gpu);
416 seq_printf(m, "status: %08x\n", 415 seq_printf(m, "status: %08x\n",
417 gpu_read(gpu, REG_A3XX_RBBM_STATUS)); 416 gpu_read(gpu, REG_A3XX_RBBM_STATUS));
418 gpu->funcs->pm_suspend(gpu);
419 adreno_show(gpu, m); 417 adreno_show(gpu, m);
420} 418}
421#endif 419#endif
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 511bc855cc7f..dfe0eceaae3b 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -456,12 +456,8 @@ static const unsigned int a4xx_registers[] = {
456#ifdef CONFIG_DEBUG_FS 456#ifdef CONFIG_DEBUG_FS
457static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m) 457static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
458{ 458{
459 gpu->funcs->pm_resume(gpu);
460
461 seq_printf(m, "status: %08x\n", 459 seq_printf(m, "status: %08x\n",
462 gpu_read(gpu, REG_A4XX_RBBM_STATUS)); 460 gpu_read(gpu, REG_A4XX_RBBM_STATUS));
463 gpu->funcs->pm_suspend(gpu);
464
465 adreno_show(gpu, m); 461 adreno_show(gpu, m);
466 462
467} 463}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 4414cf73735d..31a9bceed32c 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1,4 +1,4 @@
1/* Copyright (c) 2016 The Linux Foundation. All rights reserved. 1/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
2 * 2 *
3 * This program is free software; you can redistribute it and/or modify 3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and 4 * it under the terms of the GNU General Public License version 2 and
@@ -534,7 +534,7 @@ static void a5xx_destroy(struct msm_gpu *gpu)
534 } 534 }
535 535
536 if (a5xx_gpu->gpmu_bo) { 536 if (a5xx_gpu->gpmu_bo) {
537 if (a5xx_gpu->gpmu_bo) 537 if (a5xx_gpu->gpmu_iova)
538 msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id); 538 msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id);
539 drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo); 539 drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
540 } 540 }
@@ -638,10 +638,8 @@ static void a5xx_cp_err_irq(struct msm_gpu *gpu)
638 } 638 }
639} 639}
640 640
641static void a5xx_rbbm_err_irq(struct msm_gpu *gpu) 641static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status)
642{ 642{
643 u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
644
645 if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) { 643 if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
646 u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS); 644 u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
647 645
@@ -653,6 +651,10 @@ static void a5xx_rbbm_err_irq(struct msm_gpu *gpu)
653 651
654 /* Clear the error */ 652 /* Clear the error */
655 gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4)); 653 gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
654
655 /* Clear the interrupt */
656 gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
657 A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
656 } 658 }
657 659
658 if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT) 660 if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
@@ -704,10 +706,16 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
704{ 706{
705 u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS); 707 u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
706 708
707 gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD, status); 709 /*
710 * Clear all the interrupts except RBBM_AHB_ERROR - if we clear it
711 * before the source is cleared the interrupt will storm.
712 */
713 gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
714 status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
708 715
716 /* Pass status to a5xx_rbbm_err_irq because we've already cleared it */
709 if (status & RBBM_ERROR_MASK) 717 if (status & RBBM_ERROR_MASK)
710 a5xx_rbbm_err_irq(gpu); 718 a5xx_rbbm_err_irq(gpu, status);
711 719
712 if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR) 720 if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
713 a5xx_cp_err_irq(gpu); 721 a5xx_cp_err_irq(gpu);
@@ -837,12 +845,8 @@ static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
837#ifdef CONFIG_DEBUG_FS 845#ifdef CONFIG_DEBUG_FS
838static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m) 846static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
839{ 847{
840 gpu->funcs->pm_resume(gpu);
841
842 seq_printf(m, "status: %08x\n", 848 seq_printf(m, "status: %08x\n",
843 gpu_read(gpu, REG_A5XX_RBBM_STATUS)); 849 gpu_read(gpu, REG_A5XX_RBBM_STATUS));
844 gpu->funcs->pm_suspend(gpu);
845
846 adreno_show(gpu, m); 850 adreno_show(gpu, m);
847} 851}
848#endif 852#endif
@@ -860,7 +864,9 @@ static const struct adreno_gpu_funcs funcs = {
860 .idle = a5xx_idle, 864 .idle = a5xx_idle,
861 .irq = a5xx_irq, 865 .irq = a5xx_irq,
862 .destroy = a5xx_destroy, 866 .destroy = a5xx_destroy,
867#ifdef CONFIG_DEBUG_FS
863 .show = a5xx_show, 868 .show = a5xx_show,
869#endif
864 }, 870 },
865 .get_timestamp = a5xx_get_timestamp, 871 .get_timestamp = a5xx_get_timestamp,
866}; 872};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index ece39b16a864..c0fa5d1c75ff 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -2,7 +2,7 @@
2 * Copyright (C) 2013-2014 Red Hat 2 * Copyright (C) 2013-2014 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 3 * Author: Rob Clark <robdclark@gmail.com>
4 * 4 *
5 * Copyright (c) 2014 The Linux Foundation. All rights reserved. 5 * Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by 8 * under the terms of the GNU General Public License version 2 as published by
@@ -17,6 +17,7 @@
17 * this program. If not, see <http://www.gnu.org/licenses/>. 17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 19
20#include <linux/pm_opp.h>
20#include "adreno_gpu.h" 21#include "adreno_gpu.h"
21 22
22#define ANY_ID 0xff 23#define ANY_ID 0xff
@@ -155,21 +156,14 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
155 156
156 if (gpu) { 157 if (gpu) {
157 int ret; 158 int ret;
158 mutex_lock(&dev->struct_mutex);
159 gpu->funcs->pm_resume(gpu);
160 mutex_unlock(&dev->struct_mutex);
161 159
162 disable_irq(gpu->irq); 160 pm_runtime_get_sync(&pdev->dev);
163 161 ret = msm_gpu_hw_init(gpu);
164 ret = gpu->funcs->hw_init(gpu); 162 pm_runtime_put_sync(&pdev->dev);
165 if (ret) { 163 if (ret) {
166 dev_err(dev->dev, "gpu hw init failed: %d\n", ret); 164 dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
167 gpu->funcs->destroy(gpu); 165 gpu->funcs->destroy(gpu);
168 gpu = NULL; 166 gpu = NULL;
169 } else {
170 enable_irq(gpu->irq);
171 /* give inactive pm a chance to kick in: */
172 msm_gpu_retire(gpu);
173 } 167 }
174 } 168 }
175 169
@@ -220,10 +214,71 @@ static int find_chipid(struct device *dev, u32 *chipid)
220 return 0; 214 return 0;
221} 215}
222 216
217/* Get legacy powerlevels from qcom,gpu-pwrlevels and populate the opp table */
218static int adreno_get_legacy_pwrlevels(struct device *dev)
219{
220 struct device_node *child, *node;
221 int ret;
222
223 node = of_find_compatible_node(dev->of_node, NULL,
224 "qcom,gpu-pwrlevels");
225 if (!node) {
226 dev_err(dev, "Could not find the GPU powerlevels\n");
227 return -ENXIO;
228 }
229
230 for_each_child_of_node(node, child) {
231 unsigned int val;
232
233 ret = of_property_read_u32(child, "qcom,gpu-freq", &val);
234 if (ret)
235 continue;
236
237 /*
238 * Skip the intentionally bogus clock value found at the bottom
239 * of most legacy frequency tables
240 */
241 if (val != 27000000)
242 dev_pm_opp_add(dev, val, 0);
243 }
244
245 return 0;
246}
247
248static int adreno_get_pwrlevels(struct device *dev,
249 struct adreno_platform_config *config)
250{
251 unsigned long freq = ULONG_MAX;
252 struct dev_pm_opp *opp;
253 int ret;
254
255 /* You down with OPP? */
256 if (!of_find_property(dev->of_node, "operating-points-v2", NULL))
257 ret = adreno_get_legacy_pwrlevels(dev);
258 else
259 ret = dev_pm_opp_of_add_table(dev);
260
261 if (ret)
262 return ret;
263
264 /* Find the fastest defined rate */
265 opp = dev_pm_opp_find_freq_floor(dev, &freq);
266 if (!IS_ERR(opp))
267 config->fast_rate = dev_pm_opp_get_freq(opp);
268
269 if (!config->fast_rate) {
270 DRM_DEV_INFO(dev,
271 "Could not find clock rate. Using default\n");
272 /* Pick a suitably safe clock speed for any target */
273 config->fast_rate = 200000000;
274 }
275
276 return 0;
277}
278
223static int adreno_bind(struct device *dev, struct device *master, void *data) 279static int adreno_bind(struct device *dev, struct device *master, void *data)
224{ 280{
225 static struct adreno_platform_config config = {}; 281 static struct adreno_platform_config config = {};
226 struct device_node *child, *node = dev->of_node;
227 u32 val; 282 u32 val;
228 int ret; 283 int ret;
229 284
@@ -238,28 +293,10 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
238 293
239 /* find clock rates: */ 294 /* find clock rates: */
240 config.fast_rate = 0; 295 config.fast_rate = 0;
241 config.slow_rate = ~0;
242 for_each_child_of_node(node, child) {
243 if (of_device_is_compatible(child, "qcom,gpu-pwrlevels")) {
244 struct device_node *pwrlvl;
245 for_each_child_of_node(child, pwrlvl) {
246 ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
247 if (ret) {
248 dev_err(dev, "could not find gpu-freq: %d\n", ret);
249 return ret;
250 }
251 config.fast_rate = max(config.fast_rate, val);
252 config.slow_rate = min(config.slow_rate, val);
253 }
254 }
255 }
256 296
257 if (!config.fast_rate) { 297 ret = adreno_get_pwrlevels(dev, &config);
258 dev_warn(dev, "could not find clk rates\n"); 298 if (ret)
259 /* This is a safe low speed for all devices: */ 299 return ret;
260 config.fast_rate = 200000000;
261 config.slow_rate = 27000000;
262 }
263 300
264 dev->platform_data = &config; 301 dev->platform_data = &config;
265 set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev)); 302 set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
@@ -296,12 +333,35 @@ static const struct of_device_id dt_match[] = {
296 {} 333 {}
297}; 334};
298 335
336#ifdef CONFIG_PM
337static int adreno_resume(struct device *dev)
338{
339 struct platform_device *pdev = to_platform_device(dev);
340 struct msm_gpu *gpu = platform_get_drvdata(pdev);
341
342 return gpu->funcs->pm_resume(gpu);
343}
344
345static int adreno_suspend(struct device *dev)
346{
347 struct platform_device *pdev = to_platform_device(dev);
348 struct msm_gpu *gpu = platform_get_drvdata(pdev);
349
350 return gpu->funcs->pm_suspend(gpu);
351}
352#endif
353
354static const struct dev_pm_ops adreno_pm_ops = {
355 SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL)
356};
357
299static struct platform_driver adreno_driver = { 358static struct platform_driver adreno_driver = {
300 .probe = adreno_probe, 359 .probe = adreno_probe,
301 .remove = adreno_remove, 360 .remove = adreno_remove,
302 .driver = { 361 .driver = {
303 .name = "adreno", 362 .name = "adreno",
304 .of_match_table = dt_match, 363 .of_match_table = dt_match,
364 .pm = &adreno_pm_ops,
305 }, 365 },
306}; 366};
307 367
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index c9bd1e6225f4..5b63fc649dcc 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -35,6 +35,9 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
35 case MSM_PARAM_GMEM_SIZE: 35 case MSM_PARAM_GMEM_SIZE:
36 *value = adreno_gpu->gmem; 36 *value = adreno_gpu->gmem;
37 return 0; 37 return 0;
38 case MSM_PARAM_GMEM_BASE:
39 *value = 0x100000;
40 return 0;
38 case MSM_PARAM_CHIP_ID: 41 case MSM_PARAM_CHIP_ID:
39 *value = adreno_gpu->rev.patchid | 42 *value = adreno_gpu->rev.patchid |
40 (adreno_gpu->rev.minor << 8) | 43 (adreno_gpu->rev.minor << 8) |
@@ -68,6 +71,14 @@ int adreno_hw_init(struct msm_gpu *gpu)
68 return ret; 71 return ret;
69 } 72 }
70 73
74 /* reset ringbuffer: */
75 gpu->rb->cur = gpu->rb->start;
76
77 /* reset completed fence seqno: */
78 adreno_gpu->memptrs->fence = gpu->fctx->completed_fence;
79 adreno_gpu->memptrs->rptr = 0;
80 adreno_gpu->memptrs->wptr = 0;
81
71 /* Setup REG_CP_RB_CNTL: */ 82 /* Setup REG_CP_RB_CNTL: */
72 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL, 83 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
73 /* size is log2(quad-words): */ 84 /* size is log2(quad-words): */
@@ -111,29 +122,20 @@ uint32_t adreno_last_fence(struct msm_gpu *gpu)
111 122
112void adreno_recover(struct msm_gpu *gpu) 123void adreno_recover(struct msm_gpu *gpu)
113{ 124{
114 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
115 struct drm_device *dev = gpu->dev; 125 struct drm_device *dev = gpu->dev;
116 int ret; 126 int ret;
117 127
118 gpu->funcs->pm_suspend(gpu); 128 // XXX pm-runtime?? we *need* the device to be off after this
119 129 // so maybe continuing to call ->pm_suspend/resume() is better?
120 /* reset ringbuffer: */
121 gpu->rb->cur = gpu->rb->start;
122
123 /* reset completed fence seqno: */
124 adreno_gpu->memptrs->fence = gpu->fctx->completed_fence;
125 adreno_gpu->memptrs->rptr = 0;
126 adreno_gpu->memptrs->wptr = 0;
127 130
131 gpu->funcs->pm_suspend(gpu);
128 gpu->funcs->pm_resume(gpu); 132 gpu->funcs->pm_resume(gpu);
129 133
130 disable_irq(gpu->irq); 134 ret = msm_gpu_hw_init(gpu);
131 ret = gpu->funcs->hw_init(gpu);
132 if (ret) { 135 if (ret) {
133 dev_err(dev->dev, "gpu hw init failed: %d\n", ret); 136 dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
134 /* hmm, oh well? */ 137 /* hmm, oh well? */
135 } 138 }
136 enable_irq(gpu->irq);
137} 139}
138 140
139void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, 141void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
@@ -259,8 +261,6 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
259 seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr); 261 seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
260 seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb)); 262 seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
261 263
262 gpu->funcs->pm_resume(gpu);
263
264 /* dump these out in a form that can be parsed by demsm: */ 264 /* dump these out in a form that can be parsed by demsm: */
265 seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name); 265 seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
266 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) { 266 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
@@ -273,8 +273,6 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
273 seq_printf(m, "IO:R %08x %08x\n", addr<<2, val); 273 seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
274 } 274 }
275 } 275 }
276
277 gpu->funcs->pm_suspend(gpu);
278} 276}
279#endif 277#endif
280 278
@@ -354,14 +352,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
354 adreno_gpu->rev = config->rev; 352 adreno_gpu->rev = config->rev;
355 353
356 gpu->fast_rate = config->fast_rate; 354 gpu->fast_rate = config->fast_rate;
357 gpu->slow_rate = config->slow_rate;
358 gpu->bus_freq = config->bus_freq; 355 gpu->bus_freq = config->bus_freq;
359#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING 356#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
360 gpu->bus_scale_table = config->bus_scale_table; 357 gpu->bus_scale_table = config->bus_scale_table;
361#endif 358#endif
362 359
363 DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u", 360 DBG("fast_rate=%u, slow_rate=27000000, bus_freq=%u",
364 gpu->fast_rate, gpu->slow_rate, gpu->bus_freq); 361 gpu->fast_rate, gpu->bus_freq);
365 362
366 ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base, 363 ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
367 adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq", 364 adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
@@ -369,6 +366,10 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
369 if (ret) 366 if (ret)
370 return ret; 367 return ret;
371 368
369 pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
370 pm_runtime_use_autosuspend(&pdev->dev);
371 pm_runtime_enable(&pdev->dev);
372
372 ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev); 373 ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
373 if (ret) { 374 if (ret) {
374 dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n", 375 dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
@@ -418,18 +419,27 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
418 return 0; 419 return 0;
419} 420}
420 421
421void adreno_gpu_cleanup(struct adreno_gpu *gpu) 422void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
422{ 423{
423 if (gpu->memptrs_bo) { 424 struct msm_gpu *gpu = &adreno_gpu->base;
424 if (gpu->memptrs) 425
425 msm_gem_put_vaddr(gpu->memptrs_bo); 426 if (adreno_gpu->memptrs_bo) {
427 if (adreno_gpu->memptrs)
428 msm_gem_put_vaddr(adreno_gpu->memptrs_bo);
429
430 if (adreno_gpu->memptrs_iova)
431 msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->id);
432
433 drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo);
434 }
435 release_firmware(adreno_gpu->pm4);
436 release_firmware(adreno_gpu->pfp);
426 437
427 if (gpu->memptrs_iova) 438 msm_gpu_cleanup(gpu);
428 msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
429 439
430 drm_gem_object_unreference_unlocked(gpu->memptrs_bo); 440 if (gpu->aspace) {
441 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
442 iommu_ports, ARRAY_SIZE(iommu_ports));
443 msm_gem_address_space_put(gpu->aspace);
431 } 444 }
432 release_firmware(gpu->pm4);
433 release_firmware(gpu->pfp);
434 msm_gpu_cleanup(&gpu->base);
435} 445}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 42e444a67630..fb4831f9f80b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -123,7 +123,7 @@ struct adreno_gpu {
123/* platform config data (ie. from DT, or pdata) */ 123/* platform config data (ie. from DT, or pdata) */
124struct adreno_platform_config { 124struct adreno_platform_config {
125 struct adreno_rev rev; 125 struct adreno_rev rev;
126 uint32_t fast_rate, slow_rate, bus_freq; 126 uint32_t fast_rate, bus_freq;
127#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING 127#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
128 struct msm_bus_scale_pdata *bus_scale_table; 128 struct msm_bus_scale_pdata *bus_scale_table;
129#endif 129#endif
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 4f79b109173d..f97a7803a02d 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1635,7 +1635,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
1635 } 1635 }
1636 1636
1637 /* Get panel node from the output port's endpoint data */ 1637 /* Get panel node from the output port's endpoint data */
1638 device_node = of_graph_get_remote_port_parent(endpoint); 1638 device_node = of_graph_get_remote_node(np, 1, 0);
1639 if (!device_node) { 1639 if (!device_node) {
1640 dev_dbg(dev, "%s: no valid device\n", __func__); 1640 dev_dbg(dev, "%s: no valid device\n", __func__);
1641 goto err; 1641 goto err;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 921270ea6059..a879ffa534b4 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -171,7 +171,7 @@ dsi_mgr_phy_enable(int id,
171 } 171 }
172 } 172 }
173 } else { 173 } else {
174 msm_dsi_host_reset_phy(mdsi->host); 174 msm_dsi_host_reset_phy(msm_dsi->host);
175 ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]); 175 ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]);
176 if (ret) 176 if (ret)
177 return ret; 177 return ret;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
index a54d3bb5baad..8177e8511afd 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -18,13 +18,6 @@
18#include <linux/hdmi.h> 18#include <linux/hdmi.h>
19#include "hdmi.h" 19#include "hdmi.h"
20 20
21
22/* Supported HDMI Audio channels */
23#define MSM_HDMI_AUDIO_CHANNEL_2 0
24#define MSM_HDMI_AUDIO_CHANNEL_4 1
25#define MSM_HDMI_AUDIO_CHANNEL_6 2
26#define MSM_HDMI_AUDIO_CHANNEL_8 3
27
28/* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */ 21/* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */
29static int nchannels[] = { 2, 4, 6, 8 }; 22static int nchannels[] = { 2, 4, 6, 8 };
30 23
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 1c29618f4ddb..f29194a74a19 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -114,15 +114,9 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
114 spin_lock_irqsave(&dev->event_lock, flags); 114 spin_lock_irqsave(&dev->event_lock, flags);
115 event = mdp4_crtc->event; 115 event = mdp4_crtc->event;
116 if (event) { 116 if (event) {
117 /* if regular vblank case (!file) or if cancel-flip from 117 mdp4_crtc->event = NULL;
118 * preclose on file that requested flip, then send the 118 DBG("%s: send event: %p", mdp4_crtc->name, event);
119 * event: 119 drm_crtc_send_vblank_event(crtc, event);
120 */
121 if (!file || (event->base.file_priv == file)) {
122 mdp4_crtc->event = NULL;
123 DBG("%s: send event: %p", mdp4_crtc->name, event);
124 drm_crtc_send_vblank_event(crtc, event);
125 }
126 } 120 }
127 spin_unlock_irqrestore(&dev->event_lock, flags); 121 spin_unlock_irqrestore(&dev->event_lock, flags);
128} 122}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index a4e1206a66a8..3d26d7774c08 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -169,7 +169,7 @@ static void mdp4_destroy(struct msm_kms *kms)
169 if (aspace) { 169 if (aspace) {
170 aspace->mmu->funcs->detach(aspace->mmu, 170 aspace->mmu->funcs->detach(aspace->mmu,
171 iommu_ports, ARRAY_SIZE(iommu_ports)); 171 iommu_ports, ARRAY_SIZE(iommu_ports));
172 msm_gem_address_space_destroy(aspace); 172 msm_gem_address_space_put(aspace);
173 } 173 }
174 174
175 if (mdp4_kms->rpm_enabled) 175 if (mdp4_kms->rpm_enabled)
@@ -225,32 +225,6 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms)
225 return 0; 225 return 0;
226} 226}
227 227
228static struct device_node *mdp4_detect_lcdc_panel(struct drm_device *dev)
229{
230 struct device_node *endpoint, *panel_node;
231 struct device_node *np = dev->dev->of_node;
232
233 /*
234 * LVDS/LCDC is the first port described in the list of ports in the
235 * MDP4 DT node.
236 */
237 endpoint = of_graph_get_endpoint_by_regs(np, 0, -1);
238 if (!endpoint) {
239 DBG("no LVDS remote endpoint\n");
240 return NULL;
241 }
242
243 panel_node = of_graph_get_remote_port_parent(endpoint);
244 if (!panel_node) {
245 DBG("no valid panel node in LVDS endpoint\n");
246 of_node_put(endpoint);
247 return NULL;
248 }
249
250 of_node_put(endpoint);
251
252 return panel_node;
253}
254 228
255static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms, 229static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
256 int intf_type) 230 int intf_type)
@@ -269,7 +243,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
269 * bail out early if there is no panel node (no need to 243 * bail out early if there is no panel node (no need to
270 * initialize LCDC encoder and LVDS connector) 244 * initialize LCDC encoder and LVDS connector)
271 */ 245 */
272 panel_node = mdp4_detect_lcdc_panel(dev); 246 panel_node = of_graph_get_remote_node(dev->dev->of_node, 0, 0);
273 if (!panel_node) 247 if (!panel_node)
274 return 0; 248 return 0;
275 249
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index ba2d017f6591..c2bdad88447e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -70,6 +70,18 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
70 .lm = { 70 .lm = {
71 .count = 5, 71 .count = 5,
72 .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 }, 72 .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
73 .instances = {
74 { .id = 0, .pp = 0, .dspp = 0,
75 .caps = MDP_LM_CAP_DISPLAY, },
76 { .id = 1, .pp = 1, .dspp = 1,
77 .caps = MDP_LM_CAP_DISPLAY, },
78 { .id = 2, .pp = 2, .dspp = 2,
79 .caps = MDP_LM_CAP_DISPLAY, },
80 { .id = 3, .pp = -1, .dspp = -1,
81 .caps = MDP_LM_CAP_WB },
82 { .id = 4, .pp = -1, .dspp = -1,
83 .caps = MDP_LM_CAP_WB },
84 },
73 .nb_stages = 5, 85 .nb_stages = 5,
74 }, 86 },
75 .dspp = { 87 .dspp = {
@@ -134,6 +146,18 @@ const struct mdp5_cfg_hw msm8x74v2_config = {
134 .lm = { 146 .lm = {
135 .count = 5, 147 .count = 5,
136 .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 }, 148 .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
149 .instances = {
150 { .id = 0, .pp = 0, .dspp = 0,
151 .caps = MDP_LM_CAP_DISPLAY, },
152 { .id = 1, .pp = 1, .dspp = 1,
153 .caps = MDP_LM_CAP_DISPLAY, },
154 { .id = 2, .pp = 2, .dspp = 2,
155 .caps = MDP_LM_CAP_DISPLAY, },
156 { .id = 3, .pp = -1, .dspp = -1,
157 .caps = MDP_LM_CAP_WB, },
158 { .id = 4, .pp = -1, .dspp = -1,
159 .caps = MDP_LM_CAP_WB, },
160 },
137 .nb_stages = 5, 161 .nb_stages = 5,
138 .max_width = 2048, 162 .max_width = 2048,
139 .max_height = 0xFFFF, 163 .max_height = 0xFFFF,
@@ -167,6 +191,7 @@ const struct mdp5_cfg_hw apq8084_config = {
167 .mdp = { 191 .mdp = {
168 .count = 1, 192 .count = 1,
169 .caps = MDP_CAP_SMP | 193 .caps = MDP_CAP_SMP |
194 MDP_CAP_SRC_SPLIT |
170 0, 195 0,
171 }, 196 },
172 .smp = { 197 .smp = {
@@ -211,6 +236,22 @@ const struct mdp5_cfg_hw apq8084_config = {
211 .lm = { 236 .lm = {
212 .count = 6, 237 .count = 6,
213 .base = { 0x03900, 0x03d00, 0x04100, 0x04500, 0x04900, 0x04d00 }, 238 .base = { 0x03900, 0x03d00, 0x04100, 0x04500, 0x04900, 0x04d00 },
239 .instances = {
240 { .id = 0, .pp = 0, .dspp = 0,
241 .caps = MDP_LM_CAP_DISPLAY |
242 MDP_LM_CAP_PAIR, },
243 { .id = 1, .pp = 1, .dspp = 1,
244 .caps = MDP_LM_CAP_DISPLAY, },
245 { .id = 2, .pp = 2, .dspp = 2,
246 .caps = MDP_LM_CAP_DISPLAY |
247 MDP_LM_CAP_PAIR, },
248 { .id = 3, .pp = -1, .dspp = -1,
249 .caps = MDP_LM_CAP_WB, },
250 { .id = 4, .pp = -1, .dspp = -1,
251 .caps = MDP_LM_CAP_WB, },
252 { .id = 5, .pp = 3, .dspp = 3,
253 .caps = MDP_LM_CAP_DISPLAY, },
254 },
214 .nb_stages = 5, 255 .nb_stages = 5,
215 .max_width = 2048, 256 .max_width = 2048,
216 .max_height = 0xFFFF, 257 .max_height = 0xFFFF,
@@ -282,6 +323,12 @@ const struct mdp5_cfg_hw msm8x16_config = {
282 .lm = { 323 .lm = {
283 .count = 2, /* LM0 and LM3 */ 324 .count = 2, /* LM0 and LM3 */
284 .base = { 0x44000, 0x47000 }, 325 .base = { 0x44000, 0x47000 },
326 .instances = {
327 { .id = 0, .pp = 0, .dspp = 0,
328 .caps = MDP_LM_CAP_DISPLAY, },
329 { .id = 3, .pp = -1, .dspp = -1,
330 .caps = MDP_LM_CAP_WB },
331 },
285 .nb_stages = 8, 332 .nb_stages = 8,
286 .max_width = 2048, 333 .max_width = 2048,
287 .max_height = 0xFFFF, 334 .max_height = 0xFFFF,
@@ -306,6 +353,7 @@ const struct mdp5_cfg_hw msm8x94_config = {
306 .mdp = { 353 .mdp = {
307 .count = 1, 354 .count = 1,
308 .caps = MDP_CAP_SMP | 355 .caps = MDP_CAP_SMP |
356 MDP_CAP_SRC_SPLIT |
309 0, 357 0,
310 }, 358 },
311 .smp = { 359 .smp = {
@@ -350,6 +398,22 @@ const struct mdp5_cfg_hw msm8x94_config = {
350 .lm = { 398 .lm = {
351 .count = 6, 399 .count = 6,
352 .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 }, 400 .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
401 .instances = {
402 { .id = 0, .pp = 0, .dspp = 0,
403 .caps = MDP_LM_CAP_DISPLAY |
404 MDP_LM_CAP_PAIR, },
405 { .id = 1, .pp = 1, .dspp = 1,
406 .caps = MDP_LM_CAP_DISPLAY, },
407 { .id = 2, .pp = 2, .dspp = 2,
408 .caps = MDP_LM_CAP_DISPLAY |
409 MDP_LM_CAP_PAIR, },
410 { .id = 3, .pp = -1, .dspp = -1,
411 .caps = MDP_LM_CAP_WB, },
412 { .id = 4, .pp = -1, .dspp = -1,
413 .caps = MDP_LM_CAP_WB, },
414 { .id = 5, .pp = 3, .dspp = 3,
415 .caps = MDP_LM_CAP_DISPLAY, },
416 },
353 .nb_stages = 8, 417 .nb_stages = 8,
354 .max_width = 2048, 418 .max_width = 2048,
355 .max_height = 0xFFFF, 419 .max_height = 0xFFFF,
@@ -385,6 +449,7 @@ const struct mdp5_cfg_hw msm8x96_config = {
385 .count = 1, 449 .count = 1,
386 .caps = MDP_CAP_DSC | 450 .caps = MDP_CAP_DSC |
387 MDP_CAP_CDM | 451 MDP_CAP_CDM |
452 MDP_CAP_SRC_SPLIT |
388 0, 453 0,
389 }, 454 },
390 .ctl = { 455 .ctl = {
@@ -434,6 +499,22 @@ const struct mdp5_cfg_hw msm8x96_config = {
434 .lm = { 499 .lm = {
435 .count = 6, 500 .count = 6,
436 .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 }, 501 .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
502 .instances = {
503 { .id = 0, .pp = 0, .dspp = 0,
504 .caps = MDP_LM_CAP_DISPLAY |
505 MDP_LM_CAP_PAIR, },
506 { .id = 1, .pp = 1, .dspp = 1,
507 .caps = MDP_LM_CAP_DISPLAY, },
508 { .id = 2, .pp = 2, .dspp = -1,
509 .caps = MDP_LM_CAP_DISPLAY |
510 MDP_LM_CAP_PAIR, },
511 { .id = 3, .pp = -1, .dspp = -1,
512 .caps = MDP_LM_CAP_WB, },
513 { .id = 4, .pp = -1, .dspp = -1,
514 .caps = MDP_LM_CAP_WB, },
515 { .id = 5, .pp = 3, .dspp = -1,
516 .caps = MDP_LM_CAP_DISPLAY, },
517 },
437 .nb_stages = 8, 518 .nb_stages = 8,
438 .max_width = 2560, 519 .max_width = 2560,
439 .max_height = 0xFFFF, 520 .max_height = 0xFFFF,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
index b1c7daaede86..75910d0f2f4c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
@@ -39,8 +39,16 @@ struct mdp5_sub_block {
39 MDP5_SUB_BLOCK_DEFINITION; 39 MDP5_SUB_BLOCK_DEFINITION;
40}; 40};
41 41
42struct mdp5_lm_instance {
43 int id;
44 int pp;
45 int dspp;
46 uint32_t caps;
47};
48
42struct mdp5_lm_block { 49struct mdp5_lm_block {
43 MDP5_SUB_BLOCK_DEFINITION; 50 MDP5_SUB_BLOCK_DEFINITION;
51 struct mdp5_lm_instance instances[MAX_BASES];
44 uint32_t nb_stages; /* number of stages per blender */ 52 uint32_t nb_stages; /* number of stages per blender */
45 uint32_t max_width; /* Maximum output resolution */ 53 uint32_t max_width; /* Maximum output resolution */
46 uint32_t max_height; 54 uint32_t max_height;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index df1c8adec3f3..8dafc7bdba48 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -51,7 +51,8 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
51 struct device *dev = encoder->dev->dev; 51 struct device *dev = encoder->dev->dev;
52 u32 total_lines_x100, vclks_line, cfg; 52 u32 total_lines_x100, vclks_line, cfg;
53 long vsync_clk_speed; 53 long vsync_clk_speed;
54 int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc)); 54 struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
55 int pp_id = mixer->pp;
55 56
56 if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) { 57 if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) {
57 dev_err(dev, "vsync_clk is not initialized\n"); 58 dev_err(dev, "vsync_clk is not initialized\n");
@@ -94,7 +95,8 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
94static int pingpong_tearcheck_enable(struct drm_encoder *encoder) 95static int pingpong_tearcheck_enable(struct drm_encoder *encoder)
95{ 96{
96 struct mdp5_kms *mdp5_kms = get_kms(encoder); 97 struct mdp5_kms *mdp5_kms = get_kms(encoder);
97 int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc)); 98 struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
99 int pp_id = mixer->pp;
98 int ret; 100 int ret;
99 101
100 ret = clk_set_rate(mdp5_kms->vsync_clk, 102 ret = clk_set_rate(mdp5_kms->vsync_clk,
@@ -119,7 +121,8 @@ static int pingpong_tearcheck_enable(struct drm_encoder *encoder)
119static void pingpong_tearcheck_disable(struct drm_encoder *encoder) 121static void pingpong_tearcheck_disable(struct drm_encoder *encoder)
120{ 122{
121 struct mdp5_kms *mdp5_kms = get_kms(encoder); 123 struct mdp5_kms *mdp5_kms = get_kms(encoder);
122 int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc)); 124 struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
125 int pp_id = mixer->pp;
123 126
124 mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 0); 127 mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 0);
125 clk_disable_unprepare(mdp5_kms->vsync_clk); 128 clk_disable_unprepare(mdp5_kms->vsync_clk);
@@ -129,8 +132,6 @@ void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
129 struct drm_display_mode *mode, 132 struct drm_display_mode *mode,
130 struct drm_display_mode *adjusted_mode) 133 struct drm_display_mode *adjusted_mode)
131{ 134{
132 struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
133
134 mode = adjusted_mode; 135 mode = adjusted_mode;
135 136
136 DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", 137 DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
@@ -142,23 +143,23 @@ void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
142 mode->vsync_end, mode->vtotal, 143 mode->vsync_end, mode->vtotal,
143 mode->type, mode->flags); 144 mode->type, mode->flags);
144 pingpong_tearcheck_setup(encoder, mode); 145 pingpong_tearcheck_setup(encoder, mode);
145 mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_cmd_enc->intf, 146 mdp5_crtc_set_pipeline(encoder->crtc);
146 mdp5_cmd_enc->ctl);
147} 147}
148 148
149void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) 149void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
150{ 150{
151 struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); 151 struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
152 struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; 152 struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
153 struct mdp5_interface *intf = &mdp5_cmd_enc->intf; 153 struct mdp5_interface *intf = mdp5_cmd_enc->intf;
154 struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
154 155
155 if (WARN_ON(!mdp5_cmd_enc->enabled)) 156 if (WARN_ON(!mdp5_cmd_enc->enabled))
156 return; 157 return;
157 158
158 pingpong_tearcheck_disable(encoder); 159 pingpong_tearcheck_disable(encoder);
159 160
160 mdp5_ctl_set_encoder_state(ctl, false); 161 mdp5_ctl_set_encoder_state(ctl, pipeline, false);
161 mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf)); 162 mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
162 163
163 bs_set(mdp5_cmd_enc, 0); 164 bs_set(mdp5_cmd_enc, 0);
164 165
@@ -169,7 +170,8 @@ void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
169{ 170{
170 struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); 171 struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
171 struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; 172 struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
172 struct mdp5_interface *intf = &mdp5_cmd_enc->intf; 173 struct mdp5_interface *intf = mdp5_cmd_enc->intf;
174 struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
173 175
174 if (WARN_ON(mdp5_cmd_enc->enabled)) 176 if (WARN_ON(mdp5_cmd_enc->enabled))
175 return; 177 return;
@@ -178,9 +180,9 @@ void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
178 if (pingpong_tearcheck_enable(encoder)) 180 if (pingpong_tearcheck_enable(encoder))
179 return; 181 return;
180 182
181 mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf)); 183 mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
182 184
183 mdp5_ctl_set_encoder_state(ctl, true); 185 mdp5_ctl_set_encoder_state(ctl, pipeline, true);
184 186
185 mdp5_cmd_enc->enabled = true; 187 mdp5_cmd_enc->enabled = true;
186} 188}
@@ -197,7 +199,7 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
197 return -EINVAL; 199 return -EINVAL;
198 200
199 mdp5_kms = get_kms(encoder); 201 mdp5_kms = get_kms(encoder);
200 intf_num = mdp5_cmd_enc->intf.num; 202 intf_num = mdp5_cmd_enc->intf->num;
201 203
202 /* Switch slave encoder's trigger MUX, to use the master's 204 /* Switch slave encoder's trigger MUX, to use the master's
203 * start signal for the slave encoder 205 * start signal for the slave encoder
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index d0c8b38b96ce..9217e0d6e93e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -32,13 +32,7 @@ struct mdp5_crtc {
32 int id; 32 int id;
33 bool enabled; 33 bool enabled;
34 34
35 /* layer mixer used for this CRTC (+ its lock): */ 35 spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
36#define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id)
37 int lm;
38 spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
39
40 /* CTL used for this CRTC: */
41 struct mdp5_ctl *ctl;
42 36
43 /* if there is a pending flip, these will be non-null: */ 37 /* if there is a pending flip, these will be non-null: */
44 struct drm_pending_vblank_event *event; 38 struct drm_pending_vblank_event *event;
@@ -61,8 +55,6 @@ struct mdp5_crtc {
61 55
62 struct completion pp_completion; 56 struct completion pp_completion;
63 57
64 bool cmd_mode;
65
66 struct { 58 struct {
67 /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/ 59 /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
68 spinlock_t lock; 60 spinlock_t lock;
@@ -97,10 +89,12 @@ static void request_pp_done_pending(struct drm_crtc *crtc)
97 89
98static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask) 90static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
99{ 91{
100 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 92 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
93 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
94 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
101 95
102 DBG("%s: flush=%08x", crtc->name, flush_mask); 96 DBG("%s: flush=%08x", crtc->name, flush_mask);
103 return mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask); 97 return mdp5_ctl_commit(ctl, pipeline, flush_mask);
104} 98}
105 99
106/* 100/*
@@ -110,19 +104,25 @@ static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
110 */ 104 */
111static u32 crtc_flush_all(struct drm_crtc *crtc) 105static u32 crtc_flush_all(struct drm_crtc *crtc)
112{ 106{
113 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 107 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
108 struct mdp5_hw_mixer *mixer, *r_mixer;
114 struct drm_plane *plane; 109 struct drm_plane *plane;
115 uint32_t flush_mask = 0; 110 uint32_t flush_mask = 0;
116 111
117 /* this should not happen: */ 112 /* this should not happen: */
118 if (WARN_ON(!mdp5_crtc->ctl)) 113 if (WARN_ON(!mdp5_cstate->ctl))
119 return 0; 114 return 0;
120 115
121 drm_atomic_crtc_for_each_plane(plane, crtc) { 116 drm_atomic_crtc_for_each_plane(plane, crtc) {
122 flush_mask |= mdp5_plane_get_flush(plane); 117 flush_mask |= mdp5_plane_get_flush(plane);
123 } 118 }
124 119
125 flush_mask |= mdp_ctl_flush_mask_lm(mdp5_crtc->lm); 120 mixer = mdp5_cstate->pipeline.mixer;
121 flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm);
122
123 r_mixer = mdp5_cstate->pipeline.r_mixer;
124 if (r_mixer)
125 flush_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
126 126
127 return crtc_flush(crtc, flush_mask); 127 return crtc_flush(crtc, flush_mask);
128} 128}
@@ -130,7 +130,10 @@ static u32 crtc_flush_all(struct drm_crtc *crtc)
130/* if file!=NULL, this is preclose potential cancel-flip path */ 130/* if file!=NULL, this is preclose potential cancel-flip path */
131static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) 131static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
132{ 132{
133 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
134 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
133 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 135 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
136 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
134 struct drm_device *dev = crtc->dev; 137 struct drm_device *dev = crtc->dev;
135 struct drm_pending_vblank_event *event; 138 struct drm_pending_vblank_event *event;
136 unsigned long flags; 139 unsigned long flags;
@@ -138,22 +141,17 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
138 spin_lock_irqsave(&dev->event_lock, flags); 141 spin_lock_irqsave(&dev->event_lock, flags);
139 event = mdp5_crtc->event; 142 event = mdp5_crtc->event;
140 if (event) { 143 if (event) {
141 /* if regular vblank case (!file) or if cancel-flip from 144 mdp5_crtc->event = NULL;
142 * preclose on file that requested flip, then send the 145 DBG("%s: send event: %p", crtc->name, event);
143 * event: 146 drm_crtc_send_vblank_event(crtc, event);
144 */
145 if (!file || (event->base.file_priv == file)) {
146 mdp5_crtc->event = NULL;
147 DBG("%s: send event: %p", crtc->name, event);
148 drm_crtc_send_vblank_event(crtc, event);
149 }
150 } 147 }
151 spin_unlock_irqrestore(&dev->event_lock, flags); 148 spin_unlock_irqrestore(&dev->event_lock, flags);
152 149
153 if (mdp5_crtc->ctl && !crtc->state->enable) { 150 if (ctl && !crtc->state->enable) {
154 /* set STAGE_UNUSED for all layers */ 151 /* set STAGE_UNUSED for all layers */
155 mdp5_ctl_blend(mdp5_crtc->ctl, NULL, 0, 0); 152 mdp5_ctl_blend(ctl, pipeline, NULL, NULL, 0, 0);
156 mdp5_crtc->ctl = NULL; 153 /* XXX: What to do here? */
154 /* mdp5_crtc->ctl = NULL; */
157 } 155 }
158} 156}
159 157
@@ -193,6 +191,12 @@ static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
193} 191}
194 192
195/* 193/*
194 * left/right pipe offsets for the stage array used in blend_setup()
195 */
196#define PIPE_LEFT 0
197#define PIPE_RIGHT 1
198
199/*
196 * blend_setup() - blend all the planes of a CRTC 200 * blend_setup() - blend all the planes of a CRTC
197 * 201 *
198 * If no base layer is available, border will be enabled as the base layer. 202 * If no base layer is available, border will be enabled as the base layer.
@@ -202,18 +206,26 @@ static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
202static void blend_setup(struct drm_crtc *crtc) 206static void blend_setup(struct drm_crtc *crtc)
203{ 207{
204 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 208 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
209 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
210 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
205 struct mdp5_kms *mdp5_kms = get_kms(crtc); 211 struct mdp5_kms *mdp5_kms = get_kms(crtc);
206 struct drm_plane *plane; 212 struct drm_plane *plane;
207 const struct mdp5_cfg_hw *hw_cfg; 213 const struct mdp5_cfg_hw *hw_cfg;
208 struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL}; 214 struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
209 const struct mdp_format *format; 215 const struct mdp_format *format;
210 uint32_t lm = mdp5_crtc->lm; 216 struct mdp5_hw_mixer *mixer = pipeline->mixer;
217 uint32_t lm = mixer->lm;
218 struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
219 uint32_t r_lm = r_mixer ? r_mixer->lm : 0;
220 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
211 uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0; 221 uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
212 unsigned long flags; 222 unsigned long flags;
213 enum mdp5_pipe stage[STAGE_MAX + 1] = { SSPP_NONE }; 223 enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
224 enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
214 int i, plane_cnt = 0; 225 int i, plane_cnt = 0;
215 bool bg_alpha_enabled = false; 226 bool bg_alpha_enabled = false;
216 u32 mixer_op_mode = 0; 227 u32 mixer_op_mode = 0;
228 u32 val;
217#define blender(stage) ((stage) - STAGE0) 229#define blender(stage) ((stage) - STAGE0)
218 230
219 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 231 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
@@ -221,14 +233,35 @@ static void blend_setup(struct drm_crtc *crtc)
221 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags); 233 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
222 234
223 /* ctl could be released already when we are shutting down: */ 235 /* ctl could be released already when we are shutting down: */
224 if (!mdp5_crtc->ctl) 236 /* XXX: Can this happen now? */
237 if (!ctl)
225 goto out; 238 goto out;
226 239
227 /* Collect all plane information */ 240 /* Collect all plane information */
228 drm_atomic_crtc_for_each_plane(plane, crtc) { 241 drm_atomic_crtc_for_each_plane(plane, crtc) {
242 enum mdp5_pipe right_pipe;
243
229 pstate = to_mdp5_plane_state(plane->state); 244 pstate = to_mdp5_plane_state(plane->state);
230 pstates[pstate->stage] = pstate; 245 pstates[pstate->stage] = pstate;
231 stage[pstate->stage] = mdp5_plane_pipe(plane); 246 stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane);
247 /*
248 * if we have a right mixer, stage the same pipe as we
249 * have on the left mixer
250 */
251 if (r_mixer)
252 r_stage[pstate->stage][PIPE_LEFT] =
253 mdp5_plane_pipe(plane);
254 /*
255 * if we have a right pipe (i.e, the plane comprises of 2
256 * hwpipes, then stage the right pipe on the right side of both
257 * the layer mixers
258 */
259 right_pipe = mdp5_plane_right_pipe(plane);
260 if (right_pipe) {
261 stage[pstate->stage][PIPE_RIGHT] = right_pipe;
262 r_stage[pstate->stage][PIPE_RIGHT] = right_pipe;
263 }
264
232 plane_cnt++; 265 plane_cnt++;
233 } 266 }
234 267
@@ -294,12 +327,27 @@ static void blend_setup(struct drm_crtc *crtc)
294 blender(i)), fg_alpha); 327 blender(i)), fg_alpha);
295 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm, 328 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
296 blender(i)), bg_alpha); 329 blender(i)), bg_alpha);
330 if (r_mixer) {
331 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(r_lm,
332 blender(i)), blend_op);
333 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm,
334 blender(i)), fg_alpha);
335 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm,
336 blender(i)), bg_alpha);
337 }
297 } 338 }
298 339
299 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), mixer_op_mode); 340 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
300 341 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm),
301 mdp5_ctl_blend(mdp5_crtc->ctl, stage, plane_cnt, ctl_blend_flags); 342 val | mixer_op_mode);
343 if (r_mixer) {
344 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
345 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm),
346 val | mixer_op_mode);
347 }
302 348
349 mdp5_ctl_blend(ctl, pipeline, stage, r_stage, plane_cnt,
350 ctl_blend_flags);
303out: 351out:
304 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags); 352 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
305} 353}
@@ -307,7 +355,12 @@ out:
307static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc) 355static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
308{ 356{
309 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 357 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
358 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
310 struct mdp5_kms *mdp5_kms = get_kms(crtc); 359 struct mdp5_kms *mdp5_kms = get_kms(crtc);
360 struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer;
361 struct mdp5_hw_mixer *r_mixer = mdp5_cstate->pipeline.r_mixer;
362 uint32_t lm = mixer->lm;
363 u32 mixer_width, val;
311 unsigned long flags; 364 unsigned long flags;
312 struct drm_display_mode *mode; 365 struct drm_display_mode *mode;
313 366
@@ -325,16 +378,40 @@ static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
325 mode->vsync_end, mode->vtotal, 378 mode->vsync_end, mode->vtotal,
326 mode->type, mode->flags); 379 mode->type, mode->flags);
327 380
381 mixer_width = mode->hdisplay;
382 if (r_mixer)
383 mixer_width /= 2;
384
328 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags); 385 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
329 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm), 386 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm),
330 MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) | 387 MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
331 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay)); 388 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
389
390 /* Assign mixer to LEFT side in source split mode */
391 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
392 val &= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
393 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), val);
394
395 if (r_mixer) {
396 u32 r_lm = r_mixer->lm;
397
398 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(r_lm),
399 MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
400 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
401
402 /* Assign mixer to RIGHT side in source split mode */
403 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
404 val |= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
405 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), val);
406 }
407
332 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags); 408 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
333} 409}
334 410
335static void mdp5_crtc_disable(struct drm_crtc *crtc) 411static void mdp5_crtc_disable(struct drm_crtc *crtc)
336{ 412{
337 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 413 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
414 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
338 struct mdp5_kms *mdp5_kms = get_kms(crtc); 415 struct mdp5_kms *mdp5_kms = get_kms(crtc);
339 416
340 DBG("%s", crtc->name); 417 DBG("%s", crtc->name);
@@ -342,7 +419,7 @@ static void mdp5_crtc_disable(struct drm_crtc *crtc)
342 if (WARN_ON(!mdp5_crtc->enabled)) 419 if (WARN_ON(!mdp5_crtc->enabled))
343 return; 420 return;
344 421
345 if (mdp5_crtc->cmd_mode) 422 if (mdp5_cstate->cmd_mode)
346 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done); 423 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
347 424
348 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err); 425 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
@@ -354,6 +431,7 @@ static void mdp5_crtc_disable(struct drm_crtc *crtc)
354static void mdp5_crtc_enable(struct drm_crtc *crtc) 431static void mdp5_crtc_enable(struct drm_crtc *crtc)
355{ 432{
356 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 433 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
434 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
357 struct mdp5_kms *mdp5_kms = get_kms(crtc); 435 struct mdp5_kms *mdp5_kms = get_kms(crtc);
358 436
359 DBG("%s", crtc->name); 437 DBG("%s", crtc->name);
@@ -364,12 +442,73 @@ static void mdp5_crtc_enable(struct drm_crtc *crtc)
364 mdp5_enable(mdp5_kms); 442 mdp5_enable(mdp5_kms);
365 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err); 443 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
366 444
367 if (mdp5_crtc->cmd_mode) 445 if (mdp5_cstate->cmd_mode)
368 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done); 446 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);
369 447
370 mdp5_crtc->enabled = true; 448 mdp5_crtc->enabled = true;
371} 449}
372 450
451int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
452 struct drm_crtc_state *new_crtc_state,
453 bool need_right_mixer)
454{
455 struct mdp5_crtc_state *mdp5_cstate =
456 to_mdp5_crtc_state(new_crtc_state);
457 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
458 struct mdp5_interface *intf;
459 bool new_mixer = false;
460
461 new_mixer = !pipeline->mixer;
462
463 if ((need_right_mixer && !pipeline->r_mixer) ||
464 (!need_right_mixer && pipeline->r_mixer))
465 new_mixer = true;
466
467 if (new_mixer) {
468 struct mdp5_hw_mixer *old_mixer = pipeline->mixer;
469 struct mdp5_hw_mixer *old_r_mixer = pipeline->r_mixer;
470 u32 caps;
471 int ret;
472
473 caps = MDP_LM_CAP_DISPLAY;
474 if (need_right_mixer)
475 caps |= MDP_LM_CAP_PAIR;
476
477 ret = mdp5_mixer_assign(new_crtc_state->state, crtc, caps,
478 &pipeline->mixer, need_right_mixer ?
479 &pipeline->r_mixer : NULL);
480 if (ret)
481 return ret;
482
483 mdp5_mixer_release(new_crtc_state->state, old_mixer);
484 if (old_r_mixer) {
485 mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
486 if (!need_right_mixer)
487 pipeline->r_mixer = NULL;
488 }
489 }
490
491 /*
492 * these should have been already set up in the encoder's atomic
493 * check (called by drm_atomic_helper_check_modeset)
494 */
495 intf = pipeline->intf;
496
497 mdp5_cstate->err_irqmask = intf2err(intf->num);
498 mdp5_cstate->vblank_irqmask = intf2vblank(pipeline->mixer, intf);
499
500 if ((intf->type == INTF_DSI) &&
501 (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
502 mdp5_cstate->pp_done_irqmask = lm2ppdone(pipeline->mixer);
503 mdp5_cstate->cmd_mode = true;
504 } else {
505 mdp5_cstate->pp_done_irqmask = 0;
506 mdp5_cstate->cmd_mode = false;
507 }
508
509 return 0;
510}
511
373struct plane_state { 512struct plane_state {
374 struct drm_plane *plane; 513 struct drm_plane *plane;
375 struct mdp5_plane_state *state; 514 struct mdp5_plane_state *state;
@@ -391,6 +530,29 @@ static bool is_fullscreen(struct drm_crtc_state *cstate,
391 ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay); 530 ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
392} 531}
393 532
533enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
534 struct drm_crtc_state *new_crtc_state,
535 struct drm_plane_state *bpstate)
536{
537 struct mdp5_crtc_state *mdp5_cstate =
538 to_mdp5_crtc_state(new_crtc_state);
539
540 /*
541 * if we're in source split mode, it's mandatory to have
542 * border out on the base stage
543 */
544 if (mdp5_cstate->pipeline.r_mixer)
545 return STAGE0;
546
547 /* if the bottom-most layer is not fullscreen, we need to use
548 * it for solid-color:
549 */
550 if (!is_fullscreen(new_crtc_state, bpstate))
551 return STAGE0;
552
553 return STAGE_BASE;
554}
555
394static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, 556static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
395 struct drm_crtc_state *state) 557 struct drm_crtc_state *state)
396{ 558{
@@ -400,8 +562,12 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
400 struct plane_state pstates[STAGE_MAX + 1]; 562 struct plane_state pstates[STAGE_MAX + 1];
401 const struct mdp5_cfg_hw *hw_cfg; 563 const struct mdp5_cfg_hw *hw_cfg;
402 const struct drm_plane_state *pstate; 564 const struct drm_plane_state *pstate;
565 const struct drm_display_mode *mode = &state->adjusted_mode;
403 bool cursor_plane = false; 566 bool cursor_plane = false;
404 int cnt = 0, base = 0, i; 567 bool need_right_mixer = false;
568 int cnt = 0, i;
569 int ret;
570 enum mdp_mixer_stage_id start;
405 571
406 DBG("%s: check", crtc->name); 572 DBG("%s: check", crtc->name);
407 573
@@ -409,32 +575,52 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
409 pstates[cnt].plane = plane; 575 pstates[cnt].plane = plane;
410 pstates[cnt].state = to_mdp5_plane_state(pstate); 576 pstates[cnt].state = to_mdp5_plane_state(pstate);
411 577
578 /*
579 * if any plane on this crtc uses 2 hwpipes, then we need
580 * the crtc to have a right hwmixer.
581 */
582 if (pstates[cnt].state->r_hwpipe)
583 need_right_mixer = true;
412 cnt++; 584 cnt++;
413 585
414 if (plane->type == DRM_PLANE_TYPE_CURSOR) 586 if (plane->type == DRM_PLANE_TYPE_CURSOR)
415 cursor_plane = true; 587 cursor_plane = true;
416 } 588 }
417 589
418 /* assign a stage based on sorted zpos property */ 590 /* bail out early if there aren't any planes */
419 sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL); 591 if (!cnt)
592 return 0;
420 593
421 /* if the bottom-most layer is not fullscreen, we need to use 594 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
422 * it for solid-color: 595
596 /*
597 * we need a right hwmixer if the mode's width is greater than a single
598 * LM's max width
423 */ 599 */
424 if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base)) 600 if (mode->hdisplay > hw_cfg->lm.max_width)
425 base++; 601 need_right_mixer = true;
602
603 ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer);
604 if (ret) {
605 dev_err(dev->dev, "couldn't assign mixers %d\n", ret);
606 return ret;
607 }
608
609 /* assign a stage based on sorted zpos property */
610 sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
426 611
427 /* trigger a warning if cursor isn't the highest zorder */ 612 /* trigger a warning if cursor isn't the highest zorder */
428 WARN_ON(cursor_plane && 613 WARN_ON(cursor_plane &&
429 (pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR)); 614 (pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));
430 615
616 start = get_start_stage(crtc, state, &pstates[0].state->base);
617
431 /* verify that there are not too many planes attached to crtc 618 /* verify that there are not too many planes attached to crtc
432 * and that we don't have conflicting mixer stages: 619 * and that we don't have conflicting mixer stages:
433 */ 620 */
434 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 621 if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
435 622 dev_err(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
436 if ((cnt + base) >= hw_cfg->lm.nb_stages) { 623 cnt, start);
437 dev_err(dev->dev, "too many planes! cnt=%d, base=%d\n", cnt, base);
438 return -EINVAL; 624 return -EINVAL;
439 } 625 }
440 626
@@ -442,7 +628,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
442 if (cursor_plane && (i == (cnt - 1))) 628 if (cursor_plane && (i == (cnt - 1)))
443 pstates[i].state->stage = hw_cfg->lm.nb_stages; 629 pstates[i].state->stage = hw_cfg->lm.nb_stages;
444 else 630 else
445 pstates[i].state->stage = STAGE_BASE + i + base; 631 pstates[i].state->stage = start + i;
446 DBG("%s: assign pipe %s on stage=%d", crtc->name, 632 DBG("%s: assign pipe %s on stage=%d", crtc->name,
447 pstates[i].plane->name, 633 pstates[i].plane->name,
448 pstates[i].state->stage); 634 pstates[i].state->stage);
@@ -461,6 +647,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
461 struct drm_crtc_state *old_crtc_state) 647 struct drm_crtc_state *old_crtc_state)
462{ 648{
463 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 649 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
650 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
464 struct drm_device *dev = crtc->dev; 651 struct drm_device *dev = crtc->dev;
465 unsigned long flags; 652 unsigned long flags;
466 653
@@ -477,7 +664,8 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
477 * it means we are trying to flush a CRTC whose state is disabled: 664 * it means we are trying to flush a CRTC whose state is disabled:
478 * nothing else needs to be done. 665 * nothing else needs to be done.
479 */ 666 */
480 if (unlikely(!mdp5_crtc->ctl)) 667 /* XXX: Can this happen now ? */
668 if (unlikely(!mdp5_cstate->ctl))
481 return; 669 return;
482 670
483 blend_setup(crtc); 671 blend_setup(crtc);
@@ -488,11 +676,16 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
488 * This is safe because no pp_done will happen before SW trigger 676 * This is safe because no pp_done will happen before SW trigger
489 * in command mode. 677 * in command mode.
490 */ 678 */
491 if (mdp5_crtc->cmd_mode) 679 if (mdp5_cstate->cmd_mode)
492 request_pp_done_pending(crtc); 680 request_pp_done_pending(crtc);
493 681
494 mdp5_crtc->flushed_mask = crtc_flush_all(crtc); 682 mdp5_crtc->flushed_mask = crtc_flush_all(crtc);
495 683
684 /* XXX are we leaking out state here? */
685 mdp5_crtc->vblank.irqmask = mdp5_cstate->vblank_irqmask;
686 mdp5_crtc->err.irqmask = mdp5_cstate->err_irqmask;
687 mdp5_crtc->pp_done.irqmask = mdp5_cstate->pp_done_irqmask;
688
496 request_pending(crtc, PENDING_FLIP); 689 request_pending(crtc, PENDING_FLIP);
497} 690}
498 691
@@ -527,11 +720,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
527 uint32_t width, uint32_t height) 720 uint32_t width, uint32_t height)
528{ 721{
529 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 722 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
723 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
724 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
530 struct drm_device *dev = crtc->dev; 725 struct drm_device *dev = crtc->dev;
531 struct mdp5_kms *mdp5_kms = get_kms(crtc); 726 struct mdp5_kms *mdp5_kms = get_kms(crtc);
532 struct drm_gem_object *cursor_bo, *old_bo = NULL; 727 struct drm_gem_object *cursor_bo, *old_bo = NULL;
533 uint32_t blendcfg, stride; 728 uint32_t blendcfg, stride;
534 uint64_t cursor_addr; 729 uint64_t cursor_addr;
730 struct mdp5_ctl *ctl;
535 int ret, lm; 731 int ret, lm;
536 enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; 732 enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
537 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); 733 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
@@ -544,7 +740,12 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
544 return -EINVAL; 740 return -EINVAL;
545 } 741 }
546 742
547 if (NULL == mdp5_crtc->ctl) 743 ctl = mdp5_cstate->ctl;
744 if (!ctl)
745 return -EINVAL;
746
747 /* don't support LM cursors when we we have source split enabled */
748 if (mdp5_cstate->pipeline.r_mixer)
548 return -EINVAL; 749 return -EINVAL;
549 750
550 if (!handle) { 751 if (!handle) {
@@ -561,7 +762,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
561 if (ret) 762 if (ret)
562 return -EINVAL; 763 return -EINVAL;
563 764
564 lm = mdp5_crtc->lm; 765 lm = mdp5_cstate->pipeline.mixer->lm;
565 stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0); 766 stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
566 767
567 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); 768 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
@@ -591,7 +792,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
591 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); 792 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
592 793
593set_cursor: 794set_cursor:
594 ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, 0, cursor_enable); 795 ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
595 if (ret) { 796 if (ret) {
596 dev_err(dev->dev, "failed to %sable cursor: %d\n", 797 dev_err(dev->dev, "failed to %sable cursor: %d\n",
597 cursor_enable ? "en" : "dis", ret); 798 cursor_enable ? "en" : "dis", ret);
@@ -613,11 +814,17 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
613{ 814{
614 struct mdp5_kms *mdp5_kms = get_kms(crtc); 815 struct mdp5_kms *mdp5_kms = get_kms(crtc);
615 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 816 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
817 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
818 uint32_t lm = mdp5_cstate->pipeline.mixer->lm;
616 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); 819 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
617 uint32_t roi_w; 820 uint32_t roi_w;
618 uint32_t roi_h; 821 uint32_t roi_h;
619 unsigned long flags; 822 unsigned long flags;
620 823
824 /* don't support LM cursors when we we have source split enabled */
825 if (mdp5_cstate->pipeline.r_mixer)
826 return -EINVAL;
827
621 /* In case the CRTC is disabled, just drop the cursor update */ 828 /* In case the CRTC is disabled, just drop the cursor update */
622 if (unlikely(!crtc->state->enable)) 829 if (unlikely(!crtc->state->enable))
623 return 0; 830 return 0;
@@ -628,10 +835,10 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
628 get_roi(crtc, &roi_w, &roi_h); 835 get_roi(crtc, &roi_w, &roi_h);
629 836
630 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); 837 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
631 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm), 838 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
632 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | 839 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
633 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w)); 840 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
634 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(mdp5_crtc->lm), 841 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
635 MDP5_LM_CURSOR_START_XY_Y_START(y) | 842 MDP5_LM_CURSOR_START_XY_Y_START(y) |
636 MDP5_LM_CURSOR_START_XY_X_START(x)); 843 MDP5_LM_CURSOR_START_XY_X_START(x));
637 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); 844 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
@@ -641,16 +848,80 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
641 return 0; 848 return 0;
642} 849}
643 850
851static void
852mdp5_crtc_atomic_print_state(struct drm_printer *p,
853 const struct drm_crtc_state *state)
854{
855 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
856 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
857 struct mdp5_kms *mdp5_kms = get_kms(state->crtc);
858
859 if (WARN_ON(!pipeline))
860 return;
861
862 drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ?
863 pipeline->mixer->name : "(null)");
864
865 if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
866 drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ?
867 pipeline->r_mixer->name : "(null)");
868}
869
870static void mdp5_crtc_reset(struct drm_crtc *crtc)
871{
872 struct mdp5_crtc_state *mdp5_cstate;
873
874 if (crtc->state) {
875 __drm_atomic_helper_crtc_destroy_state(crtc->state);
876 kfree(to_mdp5_crtc_state(crtc->state));
877 }
878
879 mdp5_cstate = kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL);
880
881 if (mdp5_cstate) {
882 mdp5_cstate->base.crtc = crtc;
883 crtc->state = &mdp5_cstate->base;
884 }
885}
886
887static struct drm_crtc_state *
888mdp5_crtc_duplicate_state(struct drm_crtc *crtc)
889{
890 struct mdp5_crtc_state *mdp5_cstate;
891
892 if (WARN_ON(!crtc->state))
893 return NULL;
894
895 mdp5_cstate = kmemdup(to_mdp5_crtc_state(crtc->state),
896 sizeof(*mdp5_cstate), GFP_KERNEL);
897 if (!mdp5_cstate)
898 return NULL;
899
900 __drm_atomic_helper_crtc_duplicate_state(crtc, &mdp5_cstate->base);
901
902 return &mdp5_cstate->base;
903}
904
905static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state)
906{
907 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
908
909 __drm_atomic_helper_crtc_destroy_state(state);
910
911 kfree(mdp5_cstate);
912}
913
644static const struct drm_crtc_funcs mdp5_crtc_funcs = { 914static const struct drm_crtc_funcs mdp5_crtc_funcs = {
645 .set_config = drm_atomic_helper_set_config, 915 .set_config = drm_atomic_helper_set_config,
646 .destroy = mdp5_crtc_destroy, 916 .destroy = mdp5_crtc_destroy,
647 .page_flip = drm_atomic_helper_page_flip, 917 .page_flip = drm_atomic_helper_page_flip,
648 .set_property = drm_atomic_helper_crtc_set_property, 918 .set_property = drm_atomic_helper_crtc_set_property,
649 .reset = drm_atomic_helper_crtc_reset, 919 .reset = mdp5_crtc_reset,
650 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 920 .atomic_duplicate_state = mdp5_crtc_duplicate_state,
651 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 921 .atomic_destroy_state = mdp5_crtc_destroy_state,
652 .cursor_set = mdp5_crtc_cursor_set, 922 .cursor_set = mdp5_crtc_cursor_set,
653 .cursor_move = mdp5_crtc_cursor_move, 923 .cursor_move = mdp5_crtc_cursor_move,
924 .atomic_print_state = mdp5_crtc_atomic_print_state,
654}; 925};
655 926
656static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = { 927static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
@@ -658,9 +929,10 @@ static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
658 .destroy = mdp5_crtc_destroy, 929 .destroy = mdp5_crtc_destroy,
659 .page_flip = drm_atomic_helper_page_flip, 930 .page_flip = drm_atomic_helper_page_flip,
660 .set_property = drm_atomic_helper_crtc_set_property, 931 .set_property = drm_atomic_helper_crtc_set_property,
661 .reset = drm_atomic_helper_crtc_reset, 932 .reset = mdp5_crtc_reset,
662 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 933 .atomic_duplicate_state = mdp5_crtc_duplicate_state,
663 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 934 .atomic_destroy_state = mdp5_crtc_destroy_state,
935 .atomic_print_state = mdp5_crtc_atomic_print_state,
664}; 936};
665 937
666static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { 938static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
@@ -710,22 +982,26 @@ static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
710{ 982{
711 struct drm_device *dev = crtc->dev; 983 struct drm_device *dev = crtc->dev;
712 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 984 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
985 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
713 int ret; 986 int ret;
714 987
715 ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion, 988 ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
716 msecs_to_jiffies(50)); 989 msecs_to_jiffies(50));
717 if (ret == 0) 990 if (ret == 0)
718 dev_warn(dev->dev, "pp done time out, lm=%d\n", mdp5_crtc->lm); 991 dev_warn(dev->dev, "pp done time out, lm=%d\n",
992 mdp5_cstate->pipeline.mixer->lm);
719} 993}
720 994
721static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc) 995static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
722{ 996{
723 struct drm_device *dev = crtc->dev; 997 struct drm_device *dev = crtc->dev;
724 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 998 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
999 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1000 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
725 int ret; 1001 int ret;
726 1002
727 /* Should not call this function if crtc is disabled. */ 1003 /* Should not call this function if crtc is disabled. */
728 if (!mdp5_crtc->ctl) 1004 if (!ctl)
729 return; 1005 return;
730 1006
731 ret = drm_crtc_vblank_get(crtc); 1007 ret = drm_crtc_vblank_get(crtc);
@@ -733,7 +1009,7 @@ static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
733 return; 1009 return;
734 1010
735 ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue, 1011 ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
736 ((mdp5_ctl_get_commit_status(mdp5_crtc->ctl) & 1012 ((mdp5_ctl_get_commit_status(ctl) &
737 mdp5_crtc->flushed_mask) == 0), 1013 mdp5_crtc->flushed_mask) == 0),
738 msecs_to_jiffies(50)); 1014 msecs_to_jiffies(50));
739 if (ret <= 0) 1015 if (ret <= 0)
@@ -750,52 +1026,54 @@ uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
750 return mdp5_crtc->vblank.irqmask; 1026 return mdp5_crtc->vblank.irqmask;
751} 1027}
752 1028
753void mdp5_crtc_set_pipeline(struct drm_crtc *crtc, 1029void mdp5_crtc_set_pipeline(struct drm_crtc *crtc)
754 struct mdp5_interface *intf, struct mdp5_ctl *ctl)
755{ 1030{
756 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 1031 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
757 struct mdp5_kms *mdp5_kms = get_kms(crtc); 1032 struct mdp5_kms *mdp5_kms = get_kms(crtc);
758 int lm = mdp5_crtc_get_lm(crtc);
759
760 /* now that we know what irq's we want: */
761 mdp5_crtc->err.irqmask = intf2err(intf->num);
762 mdp5_crtc->vblank.irqmask = intf2vblank(lm, intf);
763
764 if ((intf->type == INTF_DSI) &&
765 (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
766 mdp5_crtc->pp_done.irqmask = lm2ppdone(lm);
767 mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
768 mdp5_crtc->cmd_mode = true;
769 } else {
770 mdp5_crtc->pp_done.irqmask = 0;
771 mdp5_crtc->pp_done.irq = NULL;
772 mdp5_crtc->cmd_mode = false;
773 }
774 1033
1034 /* should this be done elsewhere ? */
775 mdp_irq_update(&mdp5_kms->base); 1035 mdp_irq_update(&mdp5_kms->base);
776 1036
777 mdp5_crtc->ctl = ctl; 1037 mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline);
778 mdp5_ctl_set_pipeline(ctl, intf, lm);
779} 1038}
780 1039
781struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc) 1040struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
782{ 1041{
783 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 1042 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
784 1043
785 return mdp5_crtc->ctl; 1044 return mdp5_cstate->ctl;
786} 1045}
787 1046
788int mdp5_crtc_get_lm(struct drm_crtc *crtc) 1047struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc)
789{ 1048{
790 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 1049 struct mdp5_crtc_state *mdp5_cstate;
791 return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm; 1050
1051 if (WARN_ON(!crtc))
1052 return ERR_PTR(-EINVAL);
1053
1054 mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1055
1056 return WARN_ON(!mdp5_cstate->pipeline.mixer) ?
1057 ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer;
1058}
1059
1060struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc)
1061{
1062 struct mdp5_crtc_state *mdp5_cstate;
1063
1064 if (WARN_ON(!crtc))
1065 return ERR_PTR(-EINVAL);
1066
1067 mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1068
1069 return &mdp5_cstate->pipeline;
792} 1070}
793 1071
794void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc) 1072void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
795{ 1073{
796 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 1074 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
797 1075
798 if (mdp5_crtc->cmd_mode) 1076 if (mdp5_cstate->cmd_mode)
799 mdp5_crtc_wait_for_pp_done(crtc); 1077 mdp5_crtc_wait_for_pp_done(crtc);
800 else 1078 else
801 mdp5_crtc_wait_for_flush_done(crtc); 1079 mdp5_crtc_wait_for_flush_done(crtc);
@@ -816,7 +1094,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
816 crtc = &mdp5_crtc->base; 1094 crtc = &mdp5_crtc->base;
817 1095
818 mdp5_crtc->id = id; 1096 mdp5_crtc->id = id;
819 mdp5_crtc->lm = GET_LM_ID(id);
820 1097
821 spin_lock_init(&mdp5_crtc->lm_lock); 1098 spin_lock_init(&mdp5_crtc->lm_lock);
822 spin_lock_init(&mdp5_crtc->cursor.lock); 1099 spin_lock_init(&mdp5_crtc->cursor.lock);
@@ -824,6 +1101,7 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
824 1101
825 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; 1102 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
826 mdp5_crtc->err.irq = mdp5_crtc_err_irq; 1103 mdp5_crtc->err.irq = mdp5_crtc_err_irq;
1104 mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
827 1105
828 if (cursor_plane) 1106 if (cursor_plane)
829 drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane, 1107 drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
index 8b93f7e13200..439e0a300e25 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -32,24 +32,16 @@
32#define CTL_STAT_BUSY 0x1 32#define CTL_STAT_BUSY 0x1
33#define CTL_STAT_BOOKED 0x2 33#define CTL_STAT_BOOKED 0x2
34 34
35struct op_mode {
36 struct mdp5_interface intf;
37
38 bool encoder_enabled;
39 uint32_t start_mask;
40};
41
42struct mdp5_ctl { 35struct mdp5_ctl {
43 struct mdp5_ctl_manager *ctlm; 36 struct mdp5_ctl_manager *ctlm;
44 37
45 u32 id; 38 u32 id;
46 int lm;
47 39
48 /* CTL status bitmask */ 40 /* CTL status bitmask */
49 u32 status; 41 u32 status;
50 42
51 /* Operation Mode Configuration for the Pipeline */ 43 bool encoder_enabled;
52 struct op_mode pipeline; 44 uint32_t start_mask;
53 45
54 /* REG_MDP5_CTL_*(<id>) registers access info + lock: */ 46 /* REG_MDP5_CTL_*(<id>) registers access info + lock: */
55 spinlock_t hw_lock; 47 spinlock_t hw_lock;
@@ -146,9 +138,10 @@ static void set_display_intf(struct mdp5_kms *mdp5_kms,
146 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); 138 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
147} 139}
148 140
149static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf) 141static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
150{ 142{
151 unsigned long flags; 143 unsigned long flags;
144 struct mdp5_interface *intf = pipeline->intf;
152 u32 ctl_op = 0; 145 u32 ctl_op = 0;
153 146
154 if (!mdp5_cfg_intf_is_virtual(intf->type)) 147 if (!mdp5_cfg_intf_is_virtual(intf->type))
@@ -169,52 +162,50 @@ static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
169 break; 162 break;
170 } 163 }
171 164
165 if (pipeline->r_mixer)
166 ctl_op |= MDP5_CTL_OP_PACK_3D_ENABLE |
167 MDP5_CTL_OP_PACK_3D(1);
168
172 spin_lock_irqsave(&ctl->hw_lock, flags); 169 spin_lock_irqsave(&ctl->hw_lock, flags);
173 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op); 170 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op);
174 spin_unlock_irqrestore(&ctl->hw_lock, flags); 171 spin_unlock_irqrestore(&ctl->hw_lock, flags);
175} 172}
176 173
177int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, 174int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
178 struct mdp5_interface *intf, int lm)
179{ 175{
180 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; 176 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
181 struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr); 177 struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
178 struct mdp5_interface *intf = pipeline->intf;
179 struct mdp5_hw_mixer *mixer = pipeline->mixer;
180 struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
182 181
183 if (unlikely(WARN_ON(intf->num != ctl->pipeline.intf.num))) { 182 ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm) |
184 dev_err(mdp5_kms->dev->dev, 183 mdp_ctl_flush_mask_encoder(intf);
185 "CTL %d is allocated by INTF %d, but used by INTF %d\n", 184 if (r_mixer)
186 ctl->id, ctl->pipeline.intf.num, intf->num); 185 ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
187 return -EINVAL;
188 }
189
190 ctl->lm = lm;
191
192 memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));
193
194 ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
195 mdp_ctl_flush_mask_encoder(intf);
196 186
197 /* Virtual interfaces need not set a display intf (e.g.: Writeback) */ 187 /* Virtual interfaces need not set a display intf (e.g.: Writeback) */
198 if (!mdp5_cfg_intf_is_virtual(intf->type)) 188 if (!mdp5_cfg_intf_is_virtual(intf->type))
199 set_display_intf(mdp5_kms, intf); 189 set_display_intf(mdp5_kms, intf);
200 190
201 set_ctl_op(ctl, intf); 191 set_ctl_op(ctl, pipeline);
202 192
203 return 0; 193 return 0;
204} 194}
205 195
206static bool start_signal_needed(struct mdp5_ctl *ctl) 196static bool start_signal_needed(struct mdp5_ctl *ctl,
197 struct mdp5_pipeline *pipeline)
207{ 198{
208 struct op_mode *pipeline = &ctl->pipeline; 199 struct mdp5_interface *intf = pipeline->intf;
209 200
210 if (!pipeline->encoder_enabled || pipeline->start_mask != 0) 201 if (!ctl->encoder_enabled || ctl->start_mask != 0)
211 return false; 202 return false;
212 203
213 switch (pipeline->intf.type) { 204 switch (intf->type) {
214 case INTF_WB: 205 case INTF_WB:
215 return true; 206 return true;
216 case INTF_DSI: 207 case INTF_DSI:
217 return pipeline->intf.mode == MDP5_INTF_DSI_MODE_COMMAND; 208 return intf->mode == MDP5_INTF_DSI_MODE_COMMAND;
218 default: 209 default:
219 return false; 210 return false;
220 } 211 }
@@ -236,19 +227,23 @@ static void send_start_signal(struct mdp5_ctl *ctl)
236 spin_unlock_irqrestore(&ctl->hw_lock, flags); 227 spin_unlock_irqrestore(&ctl->hw_lock, flags);
237} 228}
238 229
239static void refill_start_mask(struct mdp5_ctl *ctl) 230static void refill_start_mask(struct mdp5_ctl *ctl,
231 struct mdp5_pipeline *pipeline)
240{ 232{
241 struct op_mode *pipeline = &ctl->pipeline; 233 struct mdp5_interface *intf = pipeline->intf;
242 struct mdp5_interface *intf = &ctl->pipeline.intf; 234 struct mdp5_hw_mixer *mixer = pipeline->mixer;
235 struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
243 236
244 pipeline->start_mask = mdp_ctl_flush_mask_lm(ctl->lm); 237 ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm);
238 if (r_mixer)
239 ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
245 240
246 /* 241 /*
247 * Writeback encoder needs to program & flush 242 * Writeback encoder needs to program & flush
248 * address registers for each page flip.. 243 * address registers for each page flip..
249 */ 244 */
250 if (intf->type == INTF_WB) 245 if (intf->type == INTF_WB)
251 pipeline->start_mask |= mdp_ctl_flush_mask_encoder(intf); 246 ctl->start_mask |= mdp_ctl_flush_mask_encoder(intf);
252} 247}
253 248
254/** 249/**
@@ -259,17 +254,21 @@ static void refill_start_mask(struct mdp5_ctl *ctl)
259 * Note: 254 * Note:
260 * This encoder state is needed to trigger START signal (data path kickoff). 255 * This encoder state is needed to trigger START signal (data path kickoff).
261 */ 256 */
262int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled) 257int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl,
258 struct mdp5_pipeline *pipeline,
259 bool enabled)
263{ 260{
261 struct mdp5_interface *intf = pipeline->intf;
262
264 if (WARN_ON(!ctl)) 263 if (WARN_ON(!ctl))
265 return -EINVAL; 264 return -EINVAL;
266 265
267 ctl->pipeline.encoder_enabled = enabled; 266 ctl->encoder_enabled = enabled;
268 DBG("intf_%d: %s", ctl->pipeline.intf.num, enabled ? "on" : "off"); 267 DBG("intf_%d: %s", intf->num, enabled ? "on" : "off");
269 268
270 if (start_signal_needed(ctl)) { 269 if (start_signal_needed(ctl, pipeline)) {
271 send_start_signal(ctl); 270 send_start_signal(ctl);
272 refill_start_mask(ctl); 271 refill_start_mask(ctl, pipeline);
273 } 272 }
274 273
275 return 0; 274 return 0;
@@ -280,29 +279,35 @@ int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled)
280 * CTL registers need to be flushed after calling this function 279 * CTL registers need to be flushed after calling this function
281 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask) 280 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
282 */ 281 */
283int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable) 282int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
283 int cursor_id, bool enable)
284{ 284{
285 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; 285 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
286 unsigned long flags; 286 unsigned long flags;
287 u32 blend_cfg; 287 u32 blend_cfg;
288 int lm = ctl->lm; 288 struct mdp5_hw_mixer *mixer = pipeline->mixer;
289
290 if (unlikely(WARN_ON(!mixer))) {
291 dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM",
292 ctl->id);
293 return -EINVAL;
294 }
289 295
290 if (unlikely(WARN_ON(lm < 0))) { 296 if (pipeline->r_mixer) {
291 dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d", 297 dev_err(ctl_mgr->dev->dev, "unsupported configuration");
292 ctl->id, lm);
293 return -EINVAL; 298 return -EINVAL;
294 } 299 }
295 300
296 spin_lock_irqsave(&ctl->hw_lock, flags); 301 spin_lock_irqsave(&ctl->hw_lock, flags);
297 302
298 blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm)); 303 blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm));
299 304
300 if (enable) 305 if (enable)
301 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT; 306 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
302 else 307 else
303 blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT; 308 blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
304 309
305 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg); 310 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
306 ctl->cursor_on = enable; 311 ctl->cursor_on = enable;
307 312
308 spin_unlock_irqrestore(&ctl->hw_lock, flags); 313 spin_unlock_irqrestore(&ctl->hw_lock, flags);
@@ -355,37 +360,88 @@ static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
355 } 360 }
356} 361}
357 362
358int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt, 363static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl)
359 u32 ctl_blend_op_flags) 364{
365 unsigned long flags;
366 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
367 int i;
368
369 spin_lock_irqsave(&ctl->hw_lock, flags);
370
371 for (i = 0; i < ctl_mgr->nlm; i++) {
372 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0);
373 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0);
374 }
375
376 spin_unlock_irqrestore(&ctl->hw_lock, flags);
377}
378
379#define PIPE_LEFT 0
380#define PIPE_RIGHT 1
381int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
382 enum mdp5_pipe stage[][MAX_PIPE_STAGE],
383 enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
384 u32 stage_cnt, u32 ctl_blend_op_flags)
360{ 385{
386 struct mdp5_hw_mixer *mixer = pipeline->mixer;
387 struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
361 unsigned long flags; 388 unsigned long flags;
362 u32 blend_cfg = 0, blend_ext_cfg = 0; 389 u32 blend_cfg = 0, blend_ext_cfg = 0;
390 u32 r_blend_cfg = 0, r_blend_ext_cfg = 0;
363 int i, start_stage; 391 int i, start_stage;
364 392
393 mdp5_ctl_reset_blend_regs(ctl);
394
365 if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) { 395 if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
366 start_stage = STAGE0; 396 start_stage = STAGE0;
367 blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR; 397 blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
398 if (r_mixer)
399 r_blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
368 } else { 400 } else {
369 start_stage = STAGE_BASE; 401 start_stage = STAGE_BASE;
370 } 402 }
371 403
372 for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) { 404 for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) {
373 blend_cfg |= mdp_ctl_blend_mask(stage[i], i); 405 blend_cfg |=
374 blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i); 406 mdp_ctl_blend_mask(stage[i][PIPE_LEFT], i) |
407 mdp_ctl_blend_mask(stage[i][PIPE_RIGHT], i);
408 blend_ext_cfg |=
409 mdp_ctl_blend_ext_mask(stage[i][PIPE_LEFT], i) |
410 mdp_ctl_blend_ext_mask(stage[i][PIPE_RIGHT], i);
411 if (r_mixer) {
412 r_blend_cfg |=
413 mdp_ctl_blend_mask(r_stage[i][PIPE_LEFT], i) |
414 mdp_ctl_blend_mask(r_stage[i][PIPE_RIGHT], i);
415 r_blend_ext_cfg |=
416 mdp_ctl_blend_ext_mask(r_stage[i][PIPE_LEFT], i) |
417 mdp_ctl_blend_ext_mask(r_stage[i][PIPE_RIGHT], i);
418 }
375 } 419 }
376 420
377 spin_lock_irqsave(&ctl->hw_lock, flags); 421 spin_lock_irqsave(&ctl->hw_lock, flags);
378 if (ctl->cursor_on) 422 if (ctl->cursor_on)
379 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT; 423 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
380 424
381 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, ctl->lm), blend_cfg); 425 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
382 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, ctl->lm), blend_ext_cfg); 426 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm),
427 blend_ext_cfg);
428 if (r_mixer) {
429 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm),
430 r_blend_cfg);
431 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm),
432 r_blend_ext_cfg);
433 }
383 spin_unlock_irqrestore(&ctl->hw_lock, flags); 434 spin_unlock_irqrestore(&ctl->hw_lock, flags);
384 435
385 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(ctl->lm); 436 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm);
437 if (r_mixer)
438 ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm);
386 439
387 DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", ctl->lm, 440 DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", mixer->lm,
388 blend_cfg, blend_ext_cfg); 441 blend_cfg, blend_ext_cfg);
442 if (r_mixer)
443 DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x",
444 r_mixer->lm, r_blend_cfg, r_blend_ext_cfg);
389 445
390 return 0; 446 return 0;
391} 447}
@@ -443,7 +499,8 @@ u32 mdp_ctl_flush_mask_lm(int lm)
443 } 499 }
444} 500}
445 501
446static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask) 502static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
503 u32 flush_mask)
447{ 504{
448 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; 505 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
449 u32 sw_mask = 0; 506 u32 sw_mask = 0;
@@ -452,7 +509,7 @@ static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask)
452 509
453 /* for some targets, cursor bit is the same as LM bit */ 510 /* for some targets, cursor bit is the same as LM bit */
454 if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0)) 511 if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
455 sw_mask |= mdp_ctl_flush_mask_lm(ctl->lm); 512 sw_mask |= mdp_ctl_flush_mask_lm(pipeline->mixer->lm);
456 513
457 return sw_mask; 514 return sw_mask;
458} 515}
@@ -498,25 +555,26 @@ static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
498 * 555 *
499 * Return H/W flushed bit mask. 556 * Return H/W flushed bit mask.
500 */ 557 */
501u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask) 558u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
559 struct mdp5_pipeline *pipeline,
560 u32 flush_mask)
502{ 561{
503 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; 562 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
504 struct op_mode *pipeline = &ctl->pipeline;
505 unsigned long flags; 563 unsigned long flags;
506 u32 flush_id = ctl->id; 564 u32 flush_id = ctl->id;
507 u32 curr_ctl_flush_mask; 565 u32 curr_ctl_flush_mask;
508 566
509 pipeline->start_mask &= ~flush_mask; 567 ctl->start_mask &= ~flush_mask;
510 568
511 VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask, 569 VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask,
512 pipeline->start_mask, ctl->pending_ctl_trigger); 570 ctl->start_mask, ctl->pending_ctl_trigger);
513 571
514 if (ctl->pending_ctl_trigger & flush_mask) { 572 if (ctl->pending_ctl_trigger & flush_mask) {
515 flush_mask |= MDP5_CTL_FLUSH_CTL; 573 flush_mask |= MDP5_CTL_FLUSH_CTL;
516 ctl->pending_ctl_trigger = 0; 574 ctl->pending_ctl_trigger = 0;
517 } 575 }
518 576
519 flush_mask |= fix_sw_flush(ctl, flush_mask); 577 flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask);
520 578
521 flush_mask &= ctl_mgr->flush_hw_mask; 579 flush_mask &= ctl_mgr->flush_hw_mask;
522 580
@@ -530,9 +588,9 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
530 spin_unlock_irqrestore(&ctl->hw_lock, flags); 588 spin_unlock_irqrestore(&ctl->hw_lock, flags);
531 } 589 }
532 590
533 if (start_signal_needed(ctl)) { 591 if (start_signal_needed(ctl, pipeline)) {
534 send_start_signal(ctl); 592 send_start_signal(ctl);
535 refill_start_mask(ctl); 593 refill_start_mask(ctl, pipeline);
536 } 594 }
537 595
538 return curr_ctl_flush_mask; 596 return curr_ctl_flush_mask;
@@ -619,8 +677,6 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
619 677
620found: 678found:
621 ctl = &ctl_mgr->ctls[c]; 679 ctl = &ctl_mgr->ctls[c];
622 ctl->pipeline.intf.num = intf_num;
623 ctl->lm = -1;
624 ctl->status |= CTL_STAT_BUSY; 680 ctl->status |= CTL_STAT_BUSY;
625 ctl->pending_ctl_trigger = 0; 681 ctl->pending_ctl_trigger = 0;
626 DBG("CTL %d allocated", ctl->id); 682 DBG("CTL %d allocated", ctl->id);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
index fda00d33e4db..b63120388dc6 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -37,13 +37,17 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, int intf_num);
37int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl); 37int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl);
38 38
39struct mdp5_interface; 39struct mdp5_interface;
40int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_interface *intf, 40struct mdp5_pipeline;
41 int lm); 41int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *p);
42int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled); 42int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, struct mdp5_pipeline *p,
43 bool enabled);
43 44
44int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable); 45int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
46 int cursor_id, bool enable);
45int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable); 47int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable);
46 48
49#define MAX_PIPE_STAGE 2
50
47/* 51/*
48 * mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM) 52 * mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM)
49 * 53 *
@@ -56,8 +60,10 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable);
56 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask) 60 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
57 */ 61 */
58#define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0) 62#define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0)
59int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt, 63int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
60 u32 ctl_blend_op_flags); 64 enum mdp5_pipe stage[][MAX_PIPE_STAGE],
65 enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
66 u32 stage_cnt, u32 ctl_blend_op_flags);
61 67
62/** 68/**
63 * mdp_ctl_flush_mask...() - Register FLUSH masks 69 * mdp_ctl_flush_mask...() - Register FLUSH masks
@@ -71,7 +77,8 @@ u32 mdp_ctl_flush_mask_cursor(int cursor_id);
71u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf); 77u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf);
72 78
73/* @flush_mask: see CTL flush masks definitions below */ 79/* @flush_mask: see CTL flush masks definitions below */
74u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask); 80u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
81 u32 flush_mask);
75u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl); 82u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl);
76 83
77 84
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index 80fa482ae8ed..c2ab0f033031 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -109,7 +109,7 @@ static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
109 struct mdp5_kms *mdp5_kms = get_kms(encoder); 109 struct mdp5_kms *mdp5_kms = get_kms(encoder);
110 struct drm_device *dev = encoder->dev; 110 struct drm_device *dev = encoder->dev;
111 struct drm_connector *connector; 111 struct drm_connector *connector;
112 int intf = mdp5_encoder->intf.num; 112 int intf = mdp5_encoder->intf->num;
113 uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol; 113 uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
114 uint32_t display_v_start, display_v_end; 114 uint32_t display_v_start, display_v_end;
115 uint32_t hsync_start_x, hsync_end_x; 115 uint32_t hsync_start_x, hsync_end_x;
@@ -130,7 +130,7 @@ static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
130 ctrl_pol = 0; 130 ctrl_pol = 0;
131 131
132 /* DSI controller cannot handle active-low sync signals. */ 132 /* DSI controller cannot handle active-low sync signals. */
133 if (mdp5_encoder->intf.type != INTF_DSI) { 133 if (mdp5_encoder->intf->type != INTF_DSI) {
134 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 134 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
135 ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW; 135 ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW;
136 if (mode->flags & DRM_MODE_FLAG_NVSYNC) 136 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
@@ -175,7 +175,7 @@ static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
175 * DISPLAY_V_START = (VBP * HCYCLE) + HBP 175 * DISPLAY_V_START = (VBP * HCYCLE) + HBP
176 * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP 176 * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
177 */ 177 */
178 if (mdp5_encoder->intf.type == INTF_eDP) { 178 if (mdp5_encoder->intf->type == INTF_eDP) {
179 display_v_start += mode->htotal - mode->hsync_start; 179 display_v_start += mode->htotal - mode->hsync_start;
180 display_v_end -= mode->hsync_start - mode->hdisplay; 180 display_v_end -= mode->hsync_start - mode->hdisplay;
181 } 181 }
@@ -206,8 +206,7 @@ static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
206 206
207 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); 207 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
208 208
209 mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_encoder->intf, 209 mdp5_crtc_set_pipeline(encoder->crtc);
210 mdp5_encoder->ctl);
211} 210}
212 211
213static void mdp5_vid_encoder_disable(struct drm_encoder *encoder) 212static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
@@ -215,20 +214,21 @@ static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
215 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 214 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
216 struct mdp5_kms *mdp5_kms = get_kms(encoder); 215 struct mdp5_kms *mdp5_kms = get_kms(encoder);
217 struct mdp5_ctl *ctl = mdp5_encoder->ctl; 216 struct mdp5_ctl *ctl = mdp5_encoder->ctl;
218 int lm = mdp5_crtc_get_lm(encoder->crtc); 217 struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
219 struct mdp5_interface *intf = &mdp5_encoder->intf; 218 struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
220 int intfn = mdp5_encoder->intf.num; 219 struct mdp5_interface *intf = mdp5_encoder->intf;
220 int intfn = mdp5_encoder->intf->num;
221 unsigned long flags; 221 unsigned long flags;
222 222
223 if (WARN_ON(!mdp5_encoder->enabled)) 223 if (WARN_ON(!mdp5_encoder->enabled))
224 return; 224 return;
225 225
226 mdp5_ctl_set_encoder_state(ctl, false); 226 mdp5_ctl_set_encoder_state(ctl, pipeline, false);
227 227
228 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); 228 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
229 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0); 229 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0);
230 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); 230 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
231 mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf)); 231 mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
232 232
233 /* 233 /*
234 * Wait for a vsync so we know the ENABLE=0 latched before 234 * Wait for a vsync so we know the ENABLE=0 latched before
@@ -238,7 +238,7 @@ static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
238 * the settings changes for the new modeset (like new 238 * the settings changes for the new modeset (like new
239 * scanout buffer) don't latch properly.. 239 * scanout buffer) don't latch properly..
240 */ 240 */
241 mdp_irq_wait(&mdp5_kms->base, intf2vblank(lm, intf)); 241 mdp_irq_wait(&mdp5_kms->base, intf2vblank(mixer, intf));
242 242
243 bs_set(mdp5_encoder, 0); 243 bs_set(mdp5_encoder, 0);
244 244
@@ -250,8 +250,9 @@ static void mdp5_vid_encoder_enable(struct drm_encoder *encoder)
250 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 250 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
251 struct mdp5_kms *mdp5_kms = get_kms(encoder); 251 struct mdp5_kms *mdp5_kms = get_kms(encoder);
252 struct mdp5_ctl *ctl = mdp5_encoder->ctl; 252 struct mdp5_ctl *ctl = mdp5_encoder->ctl;
253 struct mdp5_interface *intf = &mdp5_encoder->intf; 253 struct mdp5_interface *intf = mdp5_encoder->intf;
254 int intfn = mdp5_encoder->intf.num; 254 struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
255 int intfn = intf->num;
255 unsigned long flags; 256 unsigned long flags;
256 257
257 if (WARN_ON(mdp5_encoder->enabled)) 258 if (WARN_ON(mdp5_encoder->enabled))
@@ -261,9 +262,9 @@ static void mdp5_vid_encoder_enable(struct drm_encoder *encoder)
261 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); 262 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
262 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1); 263 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1);
263 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); 264 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
264 mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf)); 265 mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
265 266
266 mdp5_ctl_set_encoder_state(ctl, true); 267 mdp5_ctl_set_encoder_state(ctl, pipeline, true);
267 268
268 mdp5_encoder->enabled = true; 269 mdp5_encoder->enabled = true;
269} 270}
@@ -273,7 +274,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
273 struct drm_display_mode *adjusted_mode) 274 struct drm_display_mode *adjusted_mode)
274{ 275{
275 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 276 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
276 struct mdp5_interface *intf = &mdp5_encoder->intf; 277 struct mdp5_interface *intf = mdp5_encoder->intf;
277 278
278 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) 279 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
279 mdp5_cmd_encoder_mode_set(encoder, mode, adjusted_mode); 280 mdp5_cmd_encoder_mode_set(encoder, mode, adjusted_mode);
@@ -284,7 +285,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
284static void mdp5_encoder_disable(struct drm_encoder *encoder) 285static void mdp5_encoder_disable(struct drm_encoder *encoder)
285{ 286{
286 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 287 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
287 struct mdp5_interface *intf = &mdp5_encoder->intf; 288 struct mdp5_interface *intf = mdp5_encoder->intf;
288 289
289 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) 290 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
290 mdp5_cmd_encoder_disable(encoder); 291 mdp5_cmd_encoder_disable(encoder);
@@ -295,7 +296,7 @@ static void mdp5_encoder_disable(struct drm_encoder *encoder)
295static void mdp5_encoder_enable(struct drm_encoder *encoder) 296static void mdp5_encoder_enable(struct drm_encoder *encoder)
296{ 297{
297 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 298 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
298 struct mdp5_interface *intf = &mdp5_encoder->intf; 299 struct mdp5_interface *intf = mdp5_encoder->intf;
299 300
300 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) 301 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
301 mdp5_cmd_encoder_disable(encoder); 302 mdp5_cmd_encoder_disable(encoder);
@@ -303,17 +304,33 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
303 mdp5_vid_encoder_enable(encoder); 304 mdp5_vid_encoder_enable(encoder);
304} 305}
305 306
307static int mdp5_encoder_atomic_check(struct drm_encoder *encoder,
308 struct drm_crtc_state *crtc_state,
309 struct drm_connector_state *conn_state)
310{
311 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
312 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc_state);
313 struct mdp5_interface *intf = mdp5_encoder->intf;
314 struct mdp5_ctl *ctl = mdp5_encoder->ctl;
315
316 mdp5_cstate->ctl = ctl;
317 mdp5_cstate->pipeline.intf = intf;
318
319 return 0;
320}
321
306static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { 322static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
307 .mode_set = mdp5_encoder_mode_set, 323 .mode_set = mdp5_encoder_mode_set,
308 .disable = mdp5_encoder_disable, 324 .disable = mdp5_encoder_disable,
309 .enable = mdp5_encoder_enable, 325 .enable = mdp5_encoder_enable,
326 .atomic_check = mdp5_encoder_atomic_check,
310}; 327};
311 328
312int mdp5_encoder_get_linecount(struct drm_encoder *encoder) 329int mdp5_encoder_get_linecount(struct drm_encoder *encoder)
313{ 330{
314 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 331 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
315 struct mdp5_kms *mdp5_kms = get_kms(encoder); 332 struct mdp5_kms *mdp5_kms = get_kms(encoder);
316 int intf = mdp5_encoder->intf.num; 333 int intf = mdp5_encoder->intf->num;
317 334
318 return mdp5_read(mdp5_kms, REG_MDP5_INTF_LINE_COUNT(intf)); 335 return mdp5_read(mdp5_kms, REG_MDP5_INTF_LINE_COUNT(intf));
319} 336}
@@ -322,7 +339,7 @@ u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder)
322{ 339{
323 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 340 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
324 struct mdp5_kms *mdp5_kms = get_kms(encoder); 341 struct mdp5_kms *mdp5_kms = get_kms(encoder);
325 int intf = mdp5_encoder->intf.num; 342 int intf = mdp5_encoder->intf->num;
326 343
327 return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf)); 344 return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf));
328} 345}
@@ -340,7 +357,7 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
340 return -EINVAL; 357 return -EINVAL;
341 358
342 mdp5_kms = get_kms(encoder); 359 mdp5_kms = get_kms(encoder);
343 intf_num = mdp5_encoder->intf.num; 360 intf_num = mdp5_encoder->intf->num;
344 361
345 /* Switch slave encoder's TimingGen Sync mode, 362 /* Switch slave encoder's TimingGen Sync mode,
346 * to use the master's enable signal for the slave encoder. 363 * to use the master's enable signal for the slave encoder.
@@ -369,7 +386,7 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
369void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode) 386void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode)
370{ 387{
371 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 388 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
372 struct mdp5_interface *intf = &mdp5_encoder->intf; 389 struct mdp5_interface *intf = mdp5_encoder->intf;
373 390
374 /* TODO: Expand this to set writeback modes too */ 391 /* TODO: Expand this to set writeback modes too */
375 if (cmd_mode) { 392 if (cmd_mode) {
@@ -385,7 +402,8 @@ void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode)
385 402
386/* initialize encoder */ 403/* initialize encoder */
387struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, 404struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
388 struct mdp5_interface *intf, struct mdp5_ctl *ctl) 405 struct mdp5_interface *intf,
406 struct mdp5_ctl *ctl)
389{ 407{
390 struct drm_encoder *encoder = NULL; 408 struct drm_encoder *encoder = NULL;
391 struct mdp5_encoder *mdp5_encoder; 409 struct mdp5_encoder *mdp5_encoder;
@@ -399,9 +417,9 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
399 goto fail; 417 goto fail;
400 } 418 }
401 419
402 memcpy(&mdp5_encoder->intf, intf, sizeof(mdp5_encoder->intf));
403 encoder = &mdp5_encoder->base; 420 encoder = &mdp5_encoder->base;
404 mdp5_encoder->ctl = ctl; 421 mdp5_encoder->ctl = ctl;
422 mdp5_encoder->intf = intf;
405 423
406 spin_lock_init(&mdp5_encoder->intf_lock); 424 spin_lock_init(&mdp5_encoder->intf_lock);
407 425
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 41ccd2a15d3c..d3d6b4cae1e6 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -93,6 +93,7 @@ struct mdp5_state *mdp5_get_state(struct drm_atomic_state *s)
93 93
94 /* Copy state: */ 94 /* Copy state: */
95 new_state->hwpipe = mdp5_kms->state->hwpipe; 95 new_state->hwpipe = mdp5_kms->state->hwpipe;
96 new_state->hwmixer = mdp5_kms->state->hwmixer;
96 if (mdp5_kms->smp) 97 if (mdp5_kms->smp)
97 new_state->smp = mdp5_kms->state->smp; 98 new_state->smp = mdp5_kms->state->smp;
98 99
@@ -165,13 +166,16 @@ static void mdp5_kms_destroy(struct msm_kms *kms)
165 struct msm_gem_address_space *aspace = mdp5_kms->aspace; 166 struct msm_gem_address_space *aspace = mdp5_kms->aspace;
166 int i; 167 int i;
167 168
169 for (i = 0; i < mdp5_kms->num_hwmixers; i++)
170 mdp5_mixer_destroy(mdp5_kms->hwmixers[i]);
171
168 for (i = 0; i < mdp5_kms->num_hwpipes; i++) 172 for (i = 0; i < mdp5_kms->num_hwpipes; i++)
169 mdp5_pipe_destroy(mdp5_kms->hwpipes[i]); 173 mdp5_pipe_destroy(mdp5_kms->hwpipes[i]);
170 174
171 if (aspace) { 175 if (aspace) {
172 aspace->mmu->funcs->detach(aspace->mmu, 176 aspace->mmu->funcs->detach(aspace->mmu,
173 iommu_ports, ARRAY_SIZE(iommu_ports)); 177 iommu_ports, ARRAY_SIZE(iommu_ports));
174 msm_gem_address_space_destroy(aspace); 178 msm_gem_address_space_put(aspace);
175 } 179 }
176} 180}
177 181
@@ -268,19 +272,14 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
268} 272}
269 273
270static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms, 274static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
271 enum mdp5_intf_type intf_type, int intf_num, 275 struct mdp5_interface *intf,
272 struct mdp5_ctl *ctl) 276 struct mdp5_ctl *ctl)
273{ 277{
274 struct drm_device *dev = mdp5_kms->dev; 278 struct drm_device *dev = mdp5_kms->dev;
275 struct msm_drm_private *priv = dev->dev_private; 279 struct msm_drm_private *priv = dev->dev_private;
276 struct drm_encoder *encoder; 280 struct drm_encoder *encoder;
277 struct mdp5_interface intf = {
278 .num = intf_num,
279 .type = intf_type,
280 .mode = MDP5_INTF_MODE_NONE,
281 };
282 281
283 encoder = mdp5_encoder_init(dev, &intf, ctl); 282 encoder = mdp5_encoder_init(dev, intf, ctl);
284 if (IS_ERR(encoder)) { 283 if (IS_ERR(encoder)) {
285 dev_err(dev->dev, "failed to construct encoder\n"); 284 dev_err(dev->dev, "failed to construct encoder\n");
286 return encoder; 285 return encoder;
@@ -309,32 +308,28 @@ static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num)
309 return -EINVAL; 308 return -EINVAL;
310} 309}
311 310
312static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) 311static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
312 struct mdp5_interface *intf)
313{ 313{
314 struct drm_device *dev = mdp5_kms->dev; 314 struct drm_device *dev = mdp5_kms->dev;
315 struct msm_drm_private *priv = dev->dev_private; 315 struct msm_drm_private *priv = dev->dev_private;
316 const struct mdp5_cfg_hw *hw_cfg =
317 mdp5_cfg_get_hw_config(mdp5_kms->cfg);
318 enum mdp5_intf_type intf_type = hw_cfg->intf.connect[intf_num];
319 struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm; 316 struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm;
320 struct mdp5_ctl *ctl; 317 struct mdp5_ctl *ctl;
321 struct drm_encoder *encoder; 318 struct drm_encoder *encoder;
322 int ret = 0; 319 int ret = 0;
323 320
324 switch (intf_type) { 321 switch (intf->type) {
325 case INTF_DISABLED:
326 break;
327 case INTF_eDP: 322 case INTF_eDP:
328 if (!priv->edp) 323 if (!priv->edp)
329 break; 324 break;
330 325
331 ctl = mdp5_ctlm_request(ctlm, intf_num); 326 ctl = mdp5_ctlm_request(ctlm, intf->num);
332 if (!ctl) { 327 if (!ctl) {
333 ret = -EINVAL; 328 ret = -EINVAL;
334 break; 329 break;
335 } 330 }
336 331
337 encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num, ctl); 332 encoder = construct_encoder(mdp5_kms, intf, ctl);
338 if (IS_ERR(encoder)) { 333 if (IS_ERR(encoder)) {
339 ret = PTR_ERR(encoder); 334 ret = PTR_ERR(encoder);
340 break; 335 break;
@@ -346,13 +341,13 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
346 if (!priv->hdmi) 341 if (!priv->hdmi)
347 break; 342 break;
348 343
349 ctl = mdp5_ctlm_request(ctlm, intf_num); 344 ctl = mdp5_ctlm_request(ctlm, intf->num);
350 if (!ctl) { 345 if (!ctl) {
351 ret = -EINVAL; 346 ret = -EINVAL;
352 break; 347 break;
353 } 348 }
354 349
355 encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num, ctl); 350 encoder = construct_encoder(mdp5_kms, intf, ctl);
356 if (IS_ERR(encoder)) { 351 if (IS_ERR(encoder)) {
357 ret = PTR_ERR(encoder); 352 ret = PTR_ERR(encoder);
358 break; 353 break;
@@ -362,11 +357,13 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
362 break; 357 break;
363 case INTF_DSI: 358 case INTF_DSI:
364 { 359 {
365 int dsi_id = get_dsi_id_from_intf(hw_cfg, intf_num); 360 const struct mdp5_cfg_hw *hw_cfg =
361 mdp5_cfg_get_hw_config(mdp5_kms->cfg);
362 int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num);
366 363
367 if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) { 364 if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
368 dev_err(dev->dev, "failed to find dsi from intf %d\n", 365 dev_err(dev->dev, "failed to find dsi from intf %d\n",
369 intf_num); 366 intf->num);
370 ret = -EINVAL; 367 ret = -EINVAL;
371 break; 368 break;
372 } 369 }
@@ -374,13 +371,13 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
374 if (!priv->dsi[dsi_id]) 371 if (!priv->dsi[dsi_id])
375 break; 372 break;
376 373
377 ctl = mdp5_ctlm_request(ctlm, intf_num); 374 ctl = mdp5_ctlm_request(ctlm, intf->num);
378 if (!ctl) { 375 if (!ctl) {
379 ret = -EINVAL; 376 ret = -EINVAL;
380 break; 377 break;
381 } 378 }
382 379
383 encoder = construct_encoder(mdp5_kms, INTF_DSI, intf_num, ctl); 380 encoder = construct_encoder(mdp5_kms, intf, ctl);
384 if (IS_ERR(encoder)) { 381 if (IS_ERR(encoder)) {
385 ret = PTR_ERR(encoder); 382 ret = PTR_ERR(encoder);
386 break; 383 break;
@@ -390,7 +387,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
390 break; 387 break;
391 } 388 }
392 default: 389 default:
393 dev_err(dev->dev, "unknown intf: %d\n", intf_type); 390 dev_err(dev->dev, "unknown intf: %d\n", intf->type);
394 ret = -EINVAL; 391 ret = -EINVAL;
395 break; 392 break;
396 } 393 }
@@ -414,8 +411,8 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
414 * Construct encoders and modeset initialize connector devices 411 * Construct encoders and modeset initialize connector devices
415 * for each external display interface. 412 * for each external display interface.
416 */ 413 */
417 for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) { 414 for (i = 0; i < mdp5_kms->num_intfs; i++) {
418 ret = modeset_init_intf(mdp5_kms, i); 415 ret = modeset_init_intf(mdp5_kms, mdp5_kms->intfs[i]);
419 if (ret) 416 if (ret)
420 goto fail; 417 goto fail;
421 } 418 }
@@ -425,7 +422,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
425 * the MDP5 interfaces) than the number of layer mixers present in HW, 422 * the MDP5 interfaces) than the number of layer mixers present in HW,
426 * but let's be safe here anyway 423 * but let's be safe here anyway
427 */ 424 */
428 num_crtcs = min(priv->num_encoders, mdp5_cfg->lm.count); 425 num_crtcs = min(priv->num_encoders, mdp5_kms->num_hwmixers);
429 426
430 /* 427 /*
431 * Construct planes equaling the number of hw pipes, and CRTCs for the 428 * Construct planes equaling the number of hw pipes, and CRTCs for the
@@ -744,6 +741,7 @@ fail:
744static void mdp5_destroy(struct platform_device *pdev) 741static void mdp5_destroy(struct platform_device *pdev)
745{ 742{
746 struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev); 743 struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
744 int i;
747 745
748 if (mdp5_kms->ctlm) 746 if (mdp5_kms->ctlm)
749 mdp5_ctlm_destroy(mdp5_kms->ctlm); 747 mdp5_ctlm_destroy(mdp5_kms->ctlm);
@@ -752,6 +750,9 @@ static void mdp5_destroy(struct platform_device *pdev)
752 if (mdp5_kms->cfg) 750 if (mdp5_kms->cfg)
753 mdp5_cfg_destroy(mdp5_kms->cfg); 751 mdp5_cfg_destroy(mdp5_kms->cfg);
754 752
753 for (i = 0; i < mdp5_kms->num_intfs; i++)
754 kfree(mdp5_kms->intfs[i]);
755
755 if (mdp5_kms->rpm_enabled) 756 if (mdp5_kms->rpm_enabled)
756 pm_runtime_disable(&pdev->dev); 757 pm_runtime_disable(&pdev->dev);
757 758
@@ -829,6 +830,64 @@ static int hwpipe_init(struct mdp5_kms *mdp5_kms)
829 return 0; 830 return 0;
830} 831}
831 832
833static int hwmixer_init(struct mdp5_kms *mdp5_kms)
834{
835 struct drm_device *dev = mdp5_kms->dev;
836 const struct mdp5_cfg_hw *hw_cfg;
837 int i, ret;
838
839 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
840
841 for (i = 0; i < hw_cfg->lm.count; i++) {
842 struct mdp5_hw_mixer *mixer;
843
844 mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]);
845 if (IS_ERR(mixer)) {
846 ret = PTR_ERR(mixer);
847 dev_err(dev->dev, "failed to construct LM%d (%d)\n",
848 i, ret);
849 return ret;
850 }
851
852 mixer->idx = mdp5_kms->num_hwmixers;
853 mdp5_kms->hwmixers[mdp5_kms->num_hwmixers++] = mixer;
854 }
855
856 return 0;
857}
858
859static int interface_init(struct mdp5_kms *mdp5_kms)
860{
861 struct drm_device *dev = mdp5_kms->dev;
862 const struct mdp5_cfg_hw *hw_cfg;
863 const enum mdp5_intf_type *intf_types;
864 int i;
865
866 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
867 intf_types = hw_cfg->intf.connect;
868
869 for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
870 struct mdp5_interface *intf;
871
872 if (intf_types[i] == INTF_DISABLED)
873 continue;
874
875 intf = kzalloc(sizeof(*intf), GFP_KERNEL);
876 if (!intf) {
877 dev_err(dev->dev, "failed to construct INTF%d\n", i);
878 return -ENOMEM;
879 }
880
881 intf->num = i;
882 intf->type = intf_types[i];
883 intf->mode = MDP5_INTF_MODE_NONE;
884 intf->idx = mdp5_kms->num_intfs;
885 mdp5_kms->intfs[mdp5_kms->num_intfs++] = intf;
886 }
887
888 return 0;
889}
890
832static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) 891static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
833{ 892{
834 struct msm_drm_private *priv = dev->dev_private; 893 struct msm_drm_private *priv = dev->dev_private;
@@ -929,6 +988,14 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
929 if (ret) 988 if (ret)
930 goto fail; 989 goto fail;
931 990
991 ret = hwmixer_init(mdp5_kms);
992 if (ret)
993 goto fail;
994
995 ret = interface_init(mdp5_kms);
996 if (ret)
997 goto fail;
998
932 /* set uninit-ed kms */ 999 /* set uninit-ed kms */
933 priv->kms = &mdp5_kms->base.base; 1000 priv->kms = &mdp5_kms->base.base;
934 1001
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 9de471191eba..8bdb7ee4983b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -23,8 +23,9 @@
23#include "mdp/mdp_kms.h" 23#include "mdp/mdp_kms.h"
24#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */ 24#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */
25#include "mdp5.xml.h" 25#include "mdp5.xml.h"
26#include "mdp5_ctl.h"
27#include "mdp5_pipe.h" 26#include "mdp5_pipe.h"
27#include "mdp5_mixer.h"
28#include "mdp5_ctl.h"
28#include "mdp5_smp.h" 29#include "mdp5_smp.h"
29 30
30struct mdp5_state; 31struct mdp5_state;
@@ -39,6 +40,12 @@ struct mdp5_kms {
39 unsigned num_hwpipes; 40 unsigned num_hwpipes;
40 struct mdp5_hw_pipe *hwpipes[SSPP_MAX]; 41 struct mdp5_hw_pipe *hwpipes[SSPP_MAX];
41 42
43 unsigned num_hwmixers;
44 struct mdp5_hw_mixer *hwmixers[8];
45
46 unsigned num_intfs;
47 struct mdp5_interface *intfs[5];
48
42 struct mdp5_cfg_handler *cfg; 49 struct mdp5_cfg_handler *cfg;
43 uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */ 50 uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */
44 51
@@ -83,6 +90,7 @@ struct mdp5_kms {
83 */ 90 */
84struct mdp5_state { 91struct mdp5_state {
85 struct mdp5_hw_pipe_state hwpipe; 92 struct mdp5_hw_pipe_state hwpipe;
93 struct mdp5_hw_mixer_state hwmixer;
86 struct mdp5_smp_state smp; 94 struct mdp5_smp_state smp;
87}; 95};
88 96
@@ -96,6 +104,7 @@ struct mdp5_plane_state {
96 struct drm_plane_state base; 104 struct drm_plane_state base;
97 105
98 struct mdp5_hw_pipe *hwpipe; 106 struct mdp5_hw_pipe *hwpipe;
107 struct mdp5_hw_pipe *r_hwpipe; /* right hwpipe */
99 108
100 /* aligned with property */ 109 /* aligned with property */
101 uint8_t premultiplied; 110 uint8_t premultiplied;
@@ -108,6 +117,28 @@ struct mdp5_plane_state {
108#define to_mdp5_plane_state(x) \ 117#define to_mdp5_plane_state(x) \
109 container_of(x, struct mdp5_plane_state, base) 118 container_of(x, struct mdp5_plane_state, base)
110 119
120struct mdp5_pipeline {
121 struct mdp5_interface *intf;
122 struct mdp5_hw_mixer *mixer;
123 struct mdp5_hw_mixer *r_mixer; /* right mixer */
124};
125
126struct mdp5_crtc_state {
127 struct drm_crtc_state base;
128
129 struct mdp5_ctl *ctl;
130 struct mdp5_pipeline pipeline;
131
132 /* these are derivatives of intf/mixer state in mdp5_pipeline */
133 u32 vblank_irqmask;
134 u32 err_irqmask;
135 u32 pp_done_irqmask;
136
137 bool cmd_mode;
138};
139#define to_mdp5_crtc_state(x) \
140 container_of(x, struct mdp5_crtc_state, base)
141
111enum mdp5_intf_mode { 142enum mdp5_intf_mode {
112 MDP5_INTF_MODE_NONE = 0, 143 MDP5_INTF_MODE_NONE = 0,
113 144
@@ -121,6 +152,7 @@ enum mdp5_intf_mode {
121}; 152};
122 153
123struct mdp5_interface { 154struct mdp5_interface {
155 int idx;
124 int num; /* display interface number */ 156 int num; /* display interface number */
125 enum mdp5_intf_type type; 157 enum mdp5_intf_type type;
126 enum mdp5_intf_mode mode; 158 enum mdp5_intf_mode mode;
@@ -128,11 +160,11 @@ struct mdp5_interface {
128 160
129struct mdp5_encoder { 161struct mdp5_encoder {
130 struct drm_encoder base; 162 struct drm_encoder base;
131 struct mdp5_interface intf;
132 spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */ 163 spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
133 bool enabled; 164 bool enabled;
134 uint32_t bsc; 165 uint32_t bsc;
135 166
167 struct mdp5_interface *intf;
136 struct mdp5_ctl *ctl; 168 struct mdp5_ctl *ctl;
137}; 169};
138#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base) 170#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
@@ -197,8 +229,8 @@ static inline uint32_t intf2err(int intf_num)
197 } 229 }
198} 230}
199 231
200#define GET_PING_PONG_ID(layer_mixer) ((layer_mixer == 5) ? 3 : layer_mixer) 232static inline uint32_t intf2vblank(struct mdp5_hw_mixer *mixer,
201static inline uint32_t intf2vblank(int lm, struct mdp5_interface *intf) 233 struct mdp5_interface *intf)
202{ 234{
203 /* 235 /*
204 * In case of DSI Command Mode, the Ping Pong's read pointer IRQ 236 * In case of DSI Command Mode, the Ping Pong's read pointer IRQ
@@ -208,7 +240,7 @@ static inline uint32_t intf2vblank(int lm, struct mdp5_interface *intf)
208 240
209 if ((intf->type == INTF_DSI) && 241 if ((intf->type == INTF_DSI) &&
210 (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) 242 (intf->mode == MDP5_INTF_DSI_MODE_COMMAND))
211 return MDP5_IRQ_PING_PONG_0_RD_PTR << GET_PING_PONG_ID(lm); 243 return MDP5_IRQ_PING_PONG_0_RD_PTR << mixer->pp;
212 244
213 if (intf->type == INTF_WB) 245 if (intf->type == INTF_WB)
214 return MDP5_IRQ_WB_2_DONE; 246 return MDP5_IRQ_WB_2_DONE;
@@ -222,9 +254,9 @@ static inline uint32_t intf2vblank(int lm, struct mdp5_interface *intf)
222 } 254 }
223} 255}
224 256
225static inline uint32_t lm2ppdone(int lm) 257static inline uint32_t lm2ppdone(struct mdp5_hw_mixer *mixer)
226{ 258{
227 return MDP5_IRQ_PING_PONG_0_DONE << GET_PING_PONG_ID(lm); 259 return MDP5_IRQ_PING_PONG_0_DONE << mixer->pp;
228} 260}
229 261
230int mdp5_disable(struct mdp5_kms *mdp5_kms); 262int mdp5_disable(struct mdp5_kms *mdp5_kms);
@@ -243,15 +275,16 @@ void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
243 275
244uint32_t mdp5_plane_get_flush(struct drm_plane *plane); 276uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
245enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); 277enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
278enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane);
246struct drm_plane *mdp5_plane_init(struct drm_device *dev, 279struct drm_plane *mdp5_plane_init(struct drm_device *dev,
247 enum drm_plane_type type); 280 enum drm_plane_type type);
248 281
249struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc); 282struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc);
250uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); 283uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
251 284
252int mdp5_crtc_get_lm(struct drm_crtc *crtc); 285struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc);
253void mdp5_crtc_set_pipeline(struct drm_crtc *crtc, 286struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc);
254 struct mdp5_interface *intf, struct mdp5_ctl *ctl); 287void mdp5_crtc_set_pipeline(struct drm_crtc *crtc);
255void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc); 288void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc);
256struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, 289struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
257 struct drm_plane *plane, 290 struct drm_plane *plane,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c
new file mode 100644
index 000000000000..8a00991f03c7
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c
@@ -0,0 +1,172 @@
1/*
2 * Copyright (C) 2017 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include "mdp5_kms.h"
18
19/*
20 * As of now, there are only 2 combinations possible for source split:
21 *
22 * Left | Right
23 * -----|------
24 * LM0 | LM1
25 * LM2 | LM5
26 *
27 */
28static int lm_right_pair[] = { 1, -1, 5, -1, -1, -1 };
29
30static int get_right_pair_idx(struct mdp5_kms *mdp5_kms, int lm)
31{
32 int i;
33 int pair_lm;
34
35 pair_lm = lm_right_pair[lm];
36 if (pair_lm < 0)
37 return -EINVAL;
38
39 for (i = 0; i < mdp5_kms->num_hwmixers; i++) {
40 struct mdp5_hw_mixer *mixer = mdp5_kms->hwmixers[i];
41
42 if (mixer->lm == pair_lm)
43 return mixer->idx;
44 }
45
46 return -1;
47}
48
49int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
50 uint32_t caps, struct mdp5_hw_mixer **mixer,
51 struct mdp5_hw_mixer **r_mixer)
52{
53 struct msm_drm_private *priv = s->dev->dev_private;
54 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
55 struct mdp5_state *state = mdp5_get_state(s);
56 struct mdp5_hw_mixer_state *new_state;
57 int i;
58
59 if (IS_ERR(state))
60 return PTR_ERR(state);
61
62 new_state = &state->hwmixer;
63
64 for (i = 0; i < mdp5_kms->num_hwmixers; i++) {
65 struct mdp5_hw_mixer *cur = mdp5_kms->hwmixers[i];
66
67 /*
68 * skip if already in-use by a different CRTC. If there is a
69 * mixer already assigned to this CRTC, it means this call is
70 * a request to get an additional right mixer. Assume that the
71 * existing mixer is the 'left' one, and try to see if we can
72 * get its corresponding 'right' pair.
73 */
74 if (new_state->hwmixer_to_crtc[cur->idx] &&
75 new_state->hwmixer_to_crtc[cur->idx] != crtc)
76 continue;
77
78 /* skip if doesn't support some required caps: */
79 if (caps & ~cur->caps)
80 continue;
81
82 if (r_mixer) {
83 int pair_idx;
84
85 pair_idx = get_right_pair_idx(mdp5_kms, cur->lm);
86 if (pair_idx < 0)
87 return -EINVAL;
88
89 if (new_state->hwmixer_to_crtc[pair_idx])
90 continue;
91
92 *r_mixer = mdp5_kms->hwmixers[pair_idx];
93 }
94
95 /*
96 * prefer a pair-able LM over an unpairable one. We can
97 * switch the CRTC from Normal mode to Source Split mode
98 * without requiring a full modeset if we had already
99 * assigned this CRTC a pair-able LM.
100 *
101 * TODO: There will be assignment sequences which would
102 * result in the CRTC requiring a full modeset, even
103 * if we have the LM resources to prevent it. For a platform
104 * with a few displays, we don't run out of pair-able LMs
105 * so easily. For now, ignore the possibility of requiring
106 * a full modeset.
107 */
108 if (!(*mixer) || cur->caps & MDP_LM_CAP_PAIR)
109 *mixer = cur;
110 }
111
112 if (!(*mixer))
113 return -ENOMEM;
114
115 if (r_mixer && !(*r_mixer))
116 return -ENOMEM;
117
118 DBG("assigning Layer Mixer %d to crtc %s", (*mixer)->lm, crtc->name);
119
120 new_state->hwmixer_to_crtc[(*mixer)->idx] = crtc;
121 if (r_mixer) {
122 DBG("assigning Right Layer Mixer %d to crtc %s", (*r_mixer)->lm,
123 crtc->name);
124 new_state->hwmixer_to_crtc[(*r_mixer)->idx] = crtc;
125 }
126
127 return 0;
128}
129
130void mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
131{
132 struct mdp5_state *state = mdp5_get_state(s);
133 struct mdp5_hw_mixer_state *new_state = &state->hwmixer;
134
135 if (!mixer)
136 return;
137
138 if (WARN_ON(!new_state->hwmixer_to_crtc[mixer->idx]))
139 return;
140
141 DBG("%s: release from crtc %s", mixer->name,
142 new_state->hwmixer_to_crtc[mixer->idx]->name);
143
144 new_state->hwmixer_to_crtc[mixer->idx] = NULL;
145}
146
147void mdp5_mixer_destroy(struct mdp5_hw_mixer *mixer)
148{
149 kfree(mixer);
150}
151
152static const char * const mixer_names[] = {
153 "LM0", "LM1", "LM2", "LM3", "LM4", "LM5",
154};
155
156struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm)
157{
158 struct mdp5_hw_mixer *mixer;
159
160 mixer = kzalloc(sizeof(*mixer), GFP_KERNEL);
161 if (!mixer)
162 return ERR_PTR(-ENOMEM);
163
164 mixer->name = mixer_names[lm->id];
165 mixer->lm = lm->id;
166 mixer->caps = lm->caps;
167 mixer->pp = lm->pp;
168 mixer->dspp = lm->dspp;
169 mixer->flush_mask = mdp_ctl_flush_mask_lm(lm->id);
170
171 return mixer;
172}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h
new file mode 100644
index 000000000000..9be94f567fbd
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright (C) 2017 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __MDP5_LM_H__
18#define __MDP5_LM_H__
19
20/* represents a hw Layer Mixer, one (or more) is dynamically assigned to a crtc */
21struct mdp5_hw_mixer {
22 int idx;
23
24 const char *name;
25
26 int lm; /* the LM instance # */
27 uint32_t caps;
28 int pp;
29 int dspp;
30
31 uint32_t flush_mask; /* used to commit LM registers */
32};
33
34/* global atomic state of assignment between CRTCs and Layer Mixers: */
35struct mdp5_hw_mixer_state {
36 struct drm_crtc *hwmixer_to_crtc[8];
37};
38
39struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm);
40void mdp5_mixer_destroy(struct mdp5_hw_mixer *lm);
41int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
42 uint32_t caps, struct mdp5_hw_mixer **mixer,
43 struct mdp5_hw_mixer **r_mixer);
44void mdp5_mixer_release(struct drm_atomic_state *s,
45 struct mdp5_hw_mixer *mixer);
46
47#endif /* __MDP5_LM_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
index 35c4dabb0c0c..2bfac3712685 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
@@ -135,7 +135,5 @@ struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
135 hwpipe->caps = caps; 135 hwpipe->caps = caps;
136 hwpipe->flush_mask = mdp_ctl_flush_mask_pipe(pipe); 136 hwpipe->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
137 137
138 spin_lock_init(&hwpipe->pipe_lock);
139
140 return hwpipe; 138 return hwpipe;
141} 139}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
index 611da7a660c9..924c3e6f9517 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
@@ -18,7 +18,8 @@
18#ifndef __MDP5_PIPE_H__ 18#ifndef __MDP5_PIPE_H__
19#define __MDP5_PIPE_H__ 19#define __MDP5_PIPE_H__
20 20
21#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */ 21/* TODO: Add SSPP_MAX in mdp5.xml.h */
22#define SSPP_MAX (SSPP_CURSOR1 + 1)
22 23
23/* represents a hw pipe, which is dynamically assigned to a plane */ 24/* represents a hw pipe, which is dynamically assigned to a plane */
24struct mdp5_hw_pipe { 25struct mdp5_hw_pipe {
@@ -27,7 +28,6 @@ struct mdp5_hw_pipe {
27 const char *name; 28 const char *name;
28 enum mdp5_pipe pipe; 29 enum mdp5_pipe pipe;
29 30
30 spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
31 uint32_t reg_offset; 31 uint32_t reg_offset;
32 uint32_t caps; 32 uint32_t caps;
33 33
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 60a5451ae0b9..a38c5fe6cc19 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -22,6 +22,8 @@
22struct mdp5_plane { 22struct mdp5_plane {
23 struct drm_plane base; 23 struct drm_plane base;
24 24
25 spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
26
25 uint32_t nformats; 27 uint32_t nformats;
26 uint32_t formats[32]; 28 uint32_t formats[32];
27}; 29};
@@ -40,9 +42,6 @@ static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane,
40 uint32_t src_w, uint32_t src_h, 42 uint32_t src_w, uint32_t src_h,
41 struct drm_modeset_acquire_ctx *ctx); 43 struct drm_modeset_acquire_ctx *ctx);
42 44
43static void set_scanout_locked(struct drm_plane *plane,
44 struct drm_framebuffer *fb);
45
46static struct mdp5_kms *get_kms(struct drm_plane *plane) 45static struct mdp5_kms *get_kms(struct drm_plane *plane)
47{ 46{
48 struct msm_drm_private *priv = plane->dev->dev_private; 47 struct msm_drm_private *priv = plane->dev->dev_private;
@@ -178,9 +177,14 @@ mdp5_plane_atomic_print_state(struct drm_printer *p,
178 const struct drm_plane_state *state) 177 const struct drm_plane_state *state)
179{ 178{
180 struct mdp5_plane_state *pstate = to_mdp5_plane_state(state); 179 struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
180 struct mdp5_kms *mdp5_kms = get_kms(state->plane);
181 181
182 drm_printf(p, "\thwpipe=%s\n", pstate->hwpipe ? 182 drm_printf(p, "\thwpipe=%s\n", pstate->hwpipe ?
183 pstate->hwpipe->name : "(null)"); 183 pstate->hwpipe->name : "(null)");
184 if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
185 drm_printf(p, "\tright-hwpipe=%s\n",
186 pstate->r_hwpipe ? pstate->r_hwpipe->name :
187 "(null)");
184 drm_printf(p, "\tpremultiplied=%u\n", pstate->premultiplied); 188 drm_printf(p, "\tpremultiplied=%u\n", pstate->premultiplied);
185 drm_printf(p, "\tzpos=%u\n", pstate->zpos); 189 drm_printf(p, "\tzpos=%u\n", pstate->zpos);
186 drm_printf(p, "\talpha=%u\n", pstate->alpha); 190 drm_printf(p, "\talpha=%u\n", pstate->alpha);
@@ -300,7 +304,9 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
300 struct drm_plane_state *old_state = plane->state; 304 struct drm_plane_state *old_state = plane->state;
301 struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg); 305 struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg);
302 bool new_hwpipe = false; 306 bool new_hwpipe = false;
307 bool need_right_hwpipe = false;
303 uint32_t max_width, max_height; 308 uint32_t max_width, max_height;
309 bool out_of_bounds = false;
304 uint32_t caps = 0; 310 uint32_t caps = 0;
305 struct drm_rect clip; 311 struct drm_rect clip;
306 int min_scale, max_scale; 312 int min_scale, max_scale;
@@ -313,7 +319,23 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
313 max_height = config->hw->lm.max_height << 16; 319 max_height = config->hw->lm.max_height << 16;
314 320
315 /* Make sure source dimensions are within bounds. */ 321 /* Make sure source dimensions are within bounds. */
316 if ((state->src_w > max_width) || (state->src_h > max_height)) { 322 if (state->src_h > max_height)
323 out_of_bounds = true;
324
325 if (state->src_w > max_width) {
326 /* If source split is supported, we can go up to 2x
327 * the max LM width, but we'd need to stage another
328 * hwpipe to the right LM. So, the drm_plane would
329 * consist of 2 hwpipes.
330 */
331 if (config->hw->mdp.caps & MDP_CAP_SRC_SPLIT &&
332 (state->src_w <= 2 * max_width))
333 need_right_hwpipe = true;
334 else
335 out_of_bounds = true;
336 }
337
338 if (out_of_bounds) {
317 struct drm_rect src = drm_plane_state_src(state); 339 struct drm_rect src = drm_plane_state_src(state);
318 DBG("Invalid source size "DRM_RECT_FP_FMT, 340 DBG("Invalid source size "DRM_RECT_FP_FMT,
319 DRM_RECT_FP_ARG(&src)); 341 DRM_RECT_FP_ARG(&src));
@@ -364,6 +386,15 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
364 if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps)) 386 if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps))
365 new_hwpipe = true; 387 new_hwpipe = true;
366 388
389 /*
390 * (re)allocte hw pipe if we're either requesting for 2 hw pipes
391 * or we're switching from 2 hw pipes to 1 hw pipe because the
392 * new src_w can be supported by 1 hw pipe itself.
393 */
394 if ((need_right_hwpipe && !mdp5_state->r_hwpipe) ||
395 (!need_right_hwpipe && mdp5_state->r_hwpipe))
396 new_hwpipe = true;
397
367 if (mdp5_kms->smp) { 398 if (mdp5_kms->smp) {
368 const struct mdp_format *format = 399 const struct mdp_format *format =
369 to_mdp_format(msm_framebuffer_format(state->fb)); 400 to_mdp_format(msm_framebuffer_format(state->fb));
@@ -382,13 +413,36 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
382 * it available for other planes? 413 * it available for other planes?
383 */ 414 */
384 struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe; 415 struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe;
416 struct mdp5_hw_pipe *old_right_hwpipe =
417 mdp5_state->r_hwpipe;
418
385 mdp5_state->hwpipe = mdp5_pipe_assign(state->state, 419 mdp5_state->hwpipe = mdp5_pipe_assign(state->state,
386 plane, caps, blkcfg); 420 plane, caps, blkcfg);
387 if (IS_ERR(mdp5_state->hwpipe)) { 421 if (IS_ERR(mdp5_state->hwpipe)) {
388 DBG("%s: failed to assign hwpipe!", plane->name); 422 DBG("%s: failed to assign hwpipe!", plane->name);
389 return PTR_ERR(mdp5_state->hwpipe); 423 return PTR_ERR(mdp5_state->hwpipe);
390 } 424 }
425
426 if (need_right_hwpipe) {
427 mdp5_state->r_hwpipe =
428 mdp5_pipe_assign(state->state, plane,
429 caps, blkcfg);
430 if (IS_ERR(mdp5_state->r_hwpipe)) {
431 DBG("%s: failed to assign right hwpipe",
432 plane->name);
433 return PTR_ERR(mdp5_state->r_hwpipe);
434 }
435 } else {
436 /*
437 * set it to NULL so that the driver knows we
438 * don't have a right hwpipe when committing a
439 * new state
440 */
441 mdp5_state->r_hwpipe = NULL;
442 }
443
391 mdp5_pipe_release(state->state, old_hwpipe); 444 mdp5_pipe_release(state->state, old_hwpipe);
445 mdp5_pipe_release(state->state, old_right_hwpipe);
392 } 446 }
393 } 447 }
394 448
@@ -437,13 +491,10 @@ static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
437 .atomic_update = mdp5_plane_atomic_update, 491 .atomic_update = mdp5_plane_atomic_update,
438}; 492};
439 493
440static void set_scanout_locked(struct drm_plane *plane, 494static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
441 struct drm_framebuffer *fb) 495 enum mdp5_pipe pipe,
496 struct drm_framebuffer *fb)
442{ 497{
443 struct mdp5_kms *mdp5_kms = get_kms(plane);
444 struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(plane->state)->hwpipe;
445 enum mdp5_pipe pipe = hwpipe->pipe;
446
447 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe), 498 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
448 MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | 499 MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
449 MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1])); 500 MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
@@ -460,8 +511,6 @@ static void set_scanout_locked(struct drm_plane *plane,
460 msm_framebuffer_iova(fb, mdp5_kms->id, 2)); 511 msm_framebuffer_iova(fb, mdp5_kms->id, 2));
461 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), 512 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
462 msm_framebuffer_iova(fb, mdp5_kms->id, 3)); 513 msm_framebuffer_iova(fb, mdp5_kms->id, 3));
463
464 plane->fb = fb;
465} 514}
466 515
467/* Note: mdp5_plane->pipe_lock must be locked */ 516/* Note: mdp5_plane->pipe_lock must be locked */
@@ -714,21 +763,129 @@ static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
714 } 763 }
715} 764}
716 765
766struct pixel_ext {
767 int left[COMP_MAX];
768 int right[COMP_MAX];
769 int top[COMP_MAX];
770 int bottom[COMP_MAX];
771};
772
773struct phase_step {
774 u32 x[COMP_MAX];
775 u32 y[COMP_MAX];
776};
777
778static void mdp5_hwpipe_mode_set(struct mdp5_kms *mdp5_kms,
779 struct mdp5_hw_pipe *hwpipe,
780 struct drm_framebuffer *fb,
781 struct phase_step *step,
782 struct pixel_ext *pe,
783 u32 scale_config, u32 hdecm, u32 vdecm,
784 bool hflip, bool vflip,
785 int crtc_x, int crtc_y,
786 unsigned int crtc_w, unsigned int crtc_h,
787 u32 src_img_w, u32 src_img_h,
788 u32 src_x, u32 src_y,
789 u32 src_w, u32 src_h)
790{
791 enum mdp5_pipe pipe = hwpipe->pipe;
792 bool has_pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT;
793 const struct mdp_format *format =
794 to_mdp_format(msm_framebuffer_format(fb));
795
796 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
797 MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_img_w) |
798 MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_img_h));
799
800 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
801 MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
802 MDP5_PIPE_SRC_SIZE_HEIGHT(src_h));
803
804 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe),
805 MDP5_PIPE_SRC_XY_X(src_x) |
806 MDP5_PIPE_SRC_XY_Y(src_y));
807
808 mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe),
809 MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) |
810 MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h));
811
812 mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe),
813 MDP5_PIPE_OUT_XY_X(crtc_x) |
814 MDP5_PIPE_OUT_XY_Y(crtc_y));
815
816 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
817 MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
818 MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
819 MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
820 MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
821 COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
822 MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
823 MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
824 COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
825 MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) |
826 MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample));
827
828 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
829 MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
830 MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
831 MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
832 MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
833
834 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
835 (hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) |
836 (vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) |
837 COND(has_pe, MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE) |
838 MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
839
840 /* not using secure mode: */
841 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
842
843 if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT)
844 mdp5_write_pixel_ext(mdp5_kms, pipe, format,
845 src_w, pe->left, pe->right,
846 src_h, pe->top, pe->bottom);
847
848 if (hwpipe->caps & MDP_PIPE_CAP_SCALE) {
849 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
850 step->x[COMP_0]);
851 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
852 step->y[COMP_0]);
853 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
854 step->x[COMP_1_2]);
855 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
856 step->y[COMP_1_2]);
857 mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
858 MDP5_PIPE_DECIMATION_VERT(vdecm) |
859 MDP5_PIPE_DECIMATION_HORZ(hdecm));
860 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe),
861 scale_config);
862 }
863
864 if (hwpipe->caps & MDP_PIPE_CAP_CSC) {
865 if (MDP_FORMAT_IS_YUV(format))
866 csc_enable(mdp5_kms, pipe,
867 mdp_get_default_csc_cfg(CSC_YUV2RGB));
868 else
869 csc_disable(mdp5_kms, pipe);
870 }
871
872 set_scanout_locked(mdp5_kms, pipe, fb);
873}
717 874
718static int mdp5_plane_mode_set(struct drm_plane *plane, 875static int mdp5_plane_mode_set(struct drm_plane *plane,
719 struct drm_crtc *crtc, struct drm_framebuffer *fb, 876 struct drm_crtc *crtc, struct drm_framebuffer *fb,
720 struct drm_rect *src, struct drm_rect *dest) 877 struct drm_rect *src, struct drm_rect *dest)
721{ 878{
879 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
722 struct drm_plane_state *pstate = plane->state; 880 struct drm_plane_state *pstate = plane->state;
723 struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe; 881 struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe;
724 struct mdp5_kms *mdp5_kms = get_kms(plane); 882 struct mdp5_kms *mdp5_kms = get_kms(plane);
725 enum mdp5_pipe pipe = hwpipe->pipe; 883 enum mdp5_pipe pipe = hwpipe->pipe;
884 struct mdp5_hw_pipe *right_hwpipe;
726 const struct mdp_format *format; 885 const struct mdp_format *format;
727 uint32_t nplanes, config = 0; 886 uint32_t nplanes, config = 0;
728 uint32_t phasex_step[COMP_MAX] = {0,}, phasey_step[COMP_MAX] = {0,}; 887 struct phase_step step = { 0 };
729 bool pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT; 888 struct pixel_ext pe = { 0 };
730 int pe_left[COMP_MAX], pe_right[COMP_MAX];
731 int pe_top[COMP_MAX], pe_bottom[COMP_MAX];
732 uint32_t hdecm = 0, vdecm = 0; 889 uint32_t hdecm = 0, vdecm = 0;
733 uint32_t pix_format; 890 uint32_t pix_format;
734 unsigned int rotation; 891 unsigned int rotation;
@@ -737,6 +894,9 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
737 unsigned int crtc_w, crtc_h; 894 unsigned int crtc_w, crtc_h;
738 uint32_t src_x, src_y; 895 uint32_t src_x, src_y;
739 uint32_t src_w, src_h; 896 uint32_t src_w, src_h;
897 uint32_t src_img_w, src_img_h;
898 uint32_t src_x_r;
899 int crtc_x_r;
740 unsigned long flags; 900 unsigned long flags;
741 int ret; 901 int ret;
742 902
@@ -765,23 +925,41 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
765 src_w = src_w >> 16; 925 src_w = src_w >> 16;
766 src_h = src_h >> 16; 926 src_h = src_h >> 16;
767 927
928 src_img_w = min(fb->width, src_w);
929 src_img_h = min(fb->height, src_h);
930
768 DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", plane->name, 931 DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", plane->name,
769 fb->base.id, src_x, src_y, src_w, src_h, 932 fb->base.id, src_x, src_y, src_w, src_h,
770 crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h); 933 crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
771 934
772 ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, phasex_step); 935 right_hwpipe = to_mdp5_plane_state(pstate)->r_hwpipe;
936 if (right_hwpipe) {
937 /*
938 * if the plane comprises of 2 hw pipes, assume that the width
939 * is split equally across them. The only parameters that varies
940 * between the 2 pipes are src_x and crtc_x
941 */
942 crtc_w /= 2;
943 src_w /= 2;
944 src_img_w /= 2;
945
946 crtc_x_r = crtc_x + crtc_w;
947 src_x_r = src_x + src_w;
948 }
949
950 ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, step.x);
773 if (ret) 951 if (ret)
774 return ret; 952 return ret;
775 953
776 ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, phasey_step); 954 ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, step.y);
777 if (ret) 955 if (ret)
778 return ret; 956 return ret;
779 957
780 if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) { 958 if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) {
781 calc_pixel_ext(format, src_w, crtc_w, phasex_step, 959 calc_pixel_ext(format, src_w, crtc_w, step.x,
782 pe_left, pe_right, true); 960 pe.left, pe.right, true);
783 calc_pixel_ext(format, src_h, crtc_h, phasey_step, 961 calc_pixel_ext(format, src_h, crtc_h, step.y,
784 pe_top, pe_bottom, false); 962 pe.top, pe.bottom, false);
785 } 963 }
786 964
787 /* TODO calc hdecm, vdecm */ 965 /* TODO calc hdecm, vdecm */
@@ -798,86 +976,23 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
798 hflip = !!(rotation & DRM_REFLECT_X); 976 hflip = !!(rotation & DRM_REFLECT_X);
799 vflip = !!(rotation & DRM_REFLECT_Y); 977 vflip = !!(rotation & DRM_REFLECT_Y);
800 978
801 spin_lock_irqsave(&hwpipe->pipe_lock, flags); 979 spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
802
803 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
804 MDP5_PIPE_SRC_IMG_SIZE_WIDTH(min(fb->width, src_w)) |
805 MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(min(fb->height, src_h)));
806
807 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
808 MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
809 MDP5_PIPE_SRC_SIZE_HEIGHT(src_h));
810 980
811 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe), 981 mdp5_hwpipe_mode_set(mdp5_kms, hwpipe, fb, &step, &pe,
812 MDP5_PIPE_SRC_XY_X(src_x) | 982 config, hdecm, vdecm, hflip, vflip,
813 MDP5_PIPE_SRC_XY_Y(src_y)); 983 crtc_x, crtc_y, crtc_w, crtc_h,
984 src_img_w, src_img_h,
985 src_x, src_y, src_w, src_h);
986 if (right_hwpipe)
987 mdp5_hwpipe_mode_set(mdp5_kms, right_hwpipe, fb, &step, &pe,
988 config, hdecm, vdecm, hflip, vflip,
989 crtc_x_r, crtc_y, crtc_w, crtc_h,
990 src_img_w, src_img_h,
991 src_x_r, src_y, src_w, src_h);
814 992
815 mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe), 993 spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
816 MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) |
817 MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h));
818 994
819 mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe), 995 plane->fb = fb;
820 MDP5_PIPE_OUT_XY_X(crtc_x) |
821 MDP5_PIPE_OUT_XY_Y(crtc_y));
822
823 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
824 MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
825 MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
826 MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
827 MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
828 COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
829 MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
830 MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
831 COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
832 MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) |
833 MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample));
834
835 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
836 MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
837 MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
838 MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
839 MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
840
841 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
842 (hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) |
843 (vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) |
844 COND(pe, MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE) |
845 MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
846
847 /* not using secure mode: */
848 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
849
850 if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT)
851 mdp5_write_pixel_ext(mdp5_kms, pipe, format,
852 src_w, pe_left, pe_right,
853 src_h, pe_top, pe_bottom);
854
855 if (hwpipe->caps & MDP_PIPE_CAP_SCALE) {
856 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
857 phasex_step[COMP_0]);
858 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
859 phasey_step[COMP_0]);
860 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
861 phasex_step[COMP_1_2]);
862 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
863 phasey_step[COMP_1_2]);
864 mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
865 MDP5_PIPE_DECIMATION_VERT(vdecm) |
866 MDP5_PIPE_DECIMATION_HORZ(hdecm));
867 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config);
868 }
869
870 if (hwpipe->caps & MDP_PIPE_CAP_CSC) {
871 if (MDP_FORMAT_IS_YUV(format))
872 csc_enable(mdp5_kms, pipe,
873 mdp_get_default_csc_cfg(CSC_YUV2RGB));
874 else
875 csc_disable(mdp5_kms, pipe);
876 }
877
878 set_scanout_locked(plane, fb);
879
880 spin_unlock_irqrestore(&hwpipe->pipe_lock, flags);
881 996
882 return ret; 997 return ret;
883} 998}
@@ -934,6 +1049,7 @@ static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane,
934 1049
935 if (new_plane_state->visible) { 1050 if (new_plane_state->visible) {
936 struct mdp5_ctl *ctl; 1051 struct mdp5_ctl *ctl;
1052 struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(crtc);
937 1053
938 ret = mdp5_plane_mode_set(plane, crtc, fb, 1054 ret = mdp5_plane_mode_set(plane, crtc, fb,
939 &new_plane_state->src, 1055 &new_plane_state->src,
@@ -942,7 +1058,7 @@ static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane,
942 1058
943 ctl = mdp5_crtc_get_ctl(crtc); 1059 ctl = mdp5_crtc_get_ctl(crtc);
944 1060
945 mdp5_ctl_commit(ctl, mdp5_plane_get_flush(plane)); 1061 mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane));
946 } 1062 }
947 1063
948 *to_mdp5_plane_state(plane_state) = 1064 *to_mdp5_plane_state(plane_state) =
@@ -959,6 +1075,10 @@ slow:
959 src_x, src_y, src_w, src_h, ctx); 1075 src_x, src_y, src_w, src_h, ctx);
960} 1076}
961 1077
1078/*
1079 * Use this func and the one below only after the atomic state has been
1080 * successfully swapped
1081 */
962enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) 1082enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
963{ 1083{
964 struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); 1084 struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
@@ -969,14 +1089,30 @@ enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
969 return pstate->hwpipe->pipe; 1089 return pstate->hwpipe->pipe;
970} 1090}
971 1091
1092enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane)
1093{
1094 struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
1095
1096 if (!pstate->r_hwpipe)
1097 return SSPP_NONE;
1098
1099 return pstate->r_hwpipe->pipe;
1100}
1101
972uint32_t mdp5_plane_get_flush(struct drm_plane *plane) 1102uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
973{ 1103{
974 struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); 1104 struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
1105 u32 mask;
975 1106
976 if (WARN_ON(!pstate->hwpipe)) 1107 if (WARN_ON(!pstate->hwpipe))
977 return 0; 1108 return 0;
978 1109
979 return pstate->hwpipe->flush_mask; 1110 mask = pstate->hwpipe->flush_mask;
1111
1112 if (pstate->r_hwpipe)
1113 mask |= pstate->r_hwpipe->flush_mask;
1114
1115 return mask;
980} 1116}
981 1117
982/* initialize plane */ 1118/* initialize plane */
@@ -998,6 +1134,8 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
998 mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats, 1134 mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
999 ARRAY_SIZE(mdp5_plane->formats), false); 1135 ARRAY_SIZE(mdp5_plane->formats), false);
1000 1136
1137 spin_lock_init(&mdp5_plane->pipe_lock);
1138
1001 if (type == DRM_PLANE_TYPE_CURSOR) 1139 if (type == DRM_PLANE_TYPE_CURSOR)
1002 ret = drm_universal_plane_init(dev, plane, 0xff, 1140 ret = drm_universal_plane_init(dev, plane, 0xff,
1003 &mdp5_cursor_plane_funcs, 1141 &mdp5_cursor_plane_funcs,
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
index 7574cdfef418..1185487e7e5e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -104,6 +104,7 @@ const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
104#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */ 104#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */
105#define MDP_CAP_DSC BIT(1) /* VESA Display Stream Compression */ 105#define MDP_CAP_DSC BIT(1) /* VESA Display Stream Compression */
106#define MDP_CAP_CDM BIT(2) /* Chroma Down Module (HDMI 2.0 YUV) */ 106#define MDP_CAP_CDM BIT(2) /* Chroma Down Module (HDMI 2.0 YUV) */
107#define MDP_CAP_SRC_SPLIT BIT(3) /* Source Split of SSPPs */
107 108
108/* MDP pipe capabilities */ 109/* MDP pipe capabilities */
109#define MDP_PIPE_CAP_HFLIP BIT(0) 110#define MDP_PIPE_CAP_HFLIP BIT(0)
@@ -114,6 +115,11 @@ const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
114#define MDP_PIPE_CAP_SW_PIX_EXT BIT(5) 115#define MDP_PIPE_CAP_SW_PIX_EXT BIT(5)
115#define MDP_PIPE_CAP_CURSOR BIT(6) 116#define MDP_PIPE_CAP_CURSOR BIT(6)
116 117
118/* MDP layer mixer caps */
119#define MDP_LM_CAP_DISPLAY BIT(0)
120#define MDP_LM_CAP_WB BIT(1)
121#define MDP_LM_CAP_PAIR BIT(2)
122
117static inline bool pipe_supports_yuv(uint32_t pipe_caps) 123static inline bool pipe_supports_yuv(uint32_t pipe_caps)
118{ 124{
119 return (pipe_caps & MDP_PIPE_CAP_SCALE) && 125 return (pipe_caps & MDP_PIPE_CAP_SCALE) &&
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 4f35d4eb85d0..1855182c76ce 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -28,7 +28,9 @@ static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
28 28
29 if (gpu) { 29 if (gpu) {
30 seq_printf(m, "%s Status:\n", gpu->name); 30 seq_printf(m, "%s Status:\n", gpu->name);
31 pm_runtime_get_sync(&gpu->pdev->dev);
31 gpu->funcs->show(gpu, m); 32 gpu->funcs->show(gpu, m);
33 pm_runtime_put_sync(&gpu->pdev->dev);
32 } 34 }
33 35
34 return 0; 36 return 0;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9208e67be453..87b5695d4034 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -55,14 +55,13 @@ int msm_register_address_space(struct drm_device *dev,
55 struct msm_gem_address_space *aspace) 55 struct msm_gem_address_space *aspace)
56{ 56{
57 struct msm_drm_private *priv = dev->dev_private; 57 struct msm_drm_private *priv = dev->dev_private;
58 int idx = priv->num_aspaces++;
59 58
60 if (WARN_ON(idx >= ARRAY_SIZE(priv->aspace))) 59 if (WARN_ON(priv->num_aspaces >= ARRAY_SIZE(priv->aspace)))
61 return -EINVAL; 60 return -EINVAL;
62 61
63 priv->aspace[idx] = aspace; 62 priv->aspace[priv->num_aspaces] = aspace;
64 63
65 return idx; 64 return priv->num_aspaces++;
66} 65}
67 66
68#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING 67#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
@@ -265,6 +264,8 @@ static int msm_drm_uninit(struct device *dev)
265 264
266 if (gpu) { 265 if (gpu) {
267 mutex_lock(&ddev->struct_mutex); 266 mutex_lock(&ddev->struct_mutex);
267 // XXX what do we do here?
268 //pm_runtime_enable(&pdev->dev);
268 gpu->funcs->pm_suspend(gpu); 269 gpu->funcs->pm_suspend(gpu);
269 mutex_unlock(&ddev->struct_mutex); 270 mutex_unlock(&ddev->struct_mutex);
270 gpu->funcs->destroy(gpu); 271 gpu->funcs->destroy(gpu);
@@ -539,7 +540,7 @@ static int msm_open(struct drm_device *dev, struct drm_file *file)
539 return 0; 540 return 0;
540} 541}
541 542
542static void msm_preclose(struct drm_device *dev, struct drm_file *file) 543static void msm_postclose(struct drm_device *dev, struct drm_file *file)
543{ 544{
544 struct msm_drm_private *priv = dev->dev_private; 545 struct msm_drm_private *priv = dev->dev_private;
545 struct msm_file_private *ctx = file->driver_priv; 546 struct msm_file_private *ctx = file->driver_priv;
@@ -812,7 +813,7 @@ static struct drm_driver msm_driver = {
812 DRIVER_ATOMIC | 813 DRIVER_ATOMIC |
813 DRIVER_MODESET, 814 DRIVER_MODESET,
814 .open = msm_open, 815 .open = msm_open,
815 .preclose = msm_preclose, 816 .postclose = msm_postclose,
816 .lastclose = msm_lastclose, 817 .lastclose = msm_lastclose,
817 .irq_handler = msm_irq, 818 .irq_handler = msm_irq,
818 .irq_preinstall = msm_irq_preinstall, 819 .irq_preinstall = msm_irq_preinstall,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index b885c3d5ae4d..28b6f9ba5066 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -191,7 +191,8 @@ void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
191int msm_gem_map_vma(struct msm_gem_address_space *aspace, 191int msm_gem_map_vma(struct msm_gem_address_space *aspace,
192 struct msm_gem_vma *vma, struct sg_table *sgt, int npages); 192 struct msm_gem_vma *vma, struct sg_table *sgt, int npages);
193 193
194void msm_gem_address_space_destroy(struct msm_gem_address_space *aspace); 194void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
195
195struct msm_gem_address_space * 196struct msm_gem_address_space *
196msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, 197msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
197 const char *name); 198 const char *name);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 59811f29607d..68e509b3b9e4 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -812,6 +812,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
812 812
813 size = PAGE_ALIGN(size); 813 size = PAGE_ALIGN(size);
814 814
815 /* Disallow zero sized objects as they make the underlying
816 * infrastructure grumpy
817 */
818 if (size == 0)
819 return ERR_PTR(-EINVAL);
820
815 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj); 821 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
816 if (ret) 822 if (ret)
817 goto fail; 823 goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 7d529516b332..1b4cf20043ea 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -18,6 +18,7 @@
18#ifndef __MSM_GEM_H__ 18#ifndef __MSM_GEM_H__
19#define __MSM_GEM_H__ 19#define __MSM_GEM_H__
20 20
21#include <linux/kref.h>
21#include <linux/reservation.h> 22#include <linux/reservation.h>
22#include "msm_drv.h" 23#include "msm_drv.h"
23 24
@@ -31,6 +32,7 @@ struct msm_gem_address_space {
31 */ 32 */
32 struct drm_mm mm; 33 struct drm_mm mm;
33 struct msm_mmu *mmu; 34 struct msm_mmu *mmu;
35 struct kref kref;
34}; 36};
35 37
36struct msm_gem_vma { 38struct msm_gem_vma {
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 1172fe7a9252..1c545ebe6a5a 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -404,6 +404,24 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
404 if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS) 404 if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
405 return -EINVAL; 405 return -EINVAL;
406 406
407 if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
408 in_fence = sync_file_get_fence(args->fence_fd);
409
410 if (!in_fence)
411 return -EINVAL;
412
413 /* TODO if we get an array-fence due to userspace merging multiple
414 * fences, we need a way to determine if all the backing fences
415 * are from our own context..
416 */
417
418 if (in_fence->context != gpu->fctx->context) {
419 ret = dma_fence_wait(in_fence, true);
420 if (ret)
421 return ret;
422 }
423 }
424
407 ret = mutex_lock_interruptible(&dev->struct_mutex); 425 ret = mutex_lock_interruptible(&dev->struct_mutex);
408 if (ret) 426 if (ret)
409 return ret; 427 return ret;
@@ -431,27 +449,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
431 if (ret) 449 if (ret)
432 goto out; 450 goto out;
433 451
434 if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
435 in_fence = sync_file_get_fence(args->fence_fd);
436
437 if (!in_fence) {
438 ret = -EINVAL;
439 goto out;
440 }
441
442 /* TODO if we get an array-fence due to userspace merging multiple
443 * fences, we need a way to determine if all the backing fences
444 * are from our own context..
445 */
446
447 if (in_fence->context != gpu->fctx->context) {
448 ret = dma_fence_wait(in_fence, true);
449 if (ret)
450 goto out;
451 }
452
453 }
454
455 if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) { 452 if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) {
456 ret = submit_fence_sync(submit); 453 ret = submit_fence_sync(submit);
457 if (ret) 454 if (ret)
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index b654eca7636a..f285d7e210db 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -19,6 +19,25 @@
19#include "msm_gem.h" 19#include "msm_gem.h"
20#include "msm_mmu.h" 20#include "msm_mmu.h"
21 21
22static void
23msm_gem_address_space_destroy(struct kref *kref)
24{
25 struct msm_gem_address_space *aspace = container_of(kref,
26 struct msm_gem_address_space, kref);
27
28 drm_mm_takedown(&aspace->mm);
29 if (aspace->mmu)
30 aspace->mmu->funcs->destroy(aspace->mmu);
31 kfree(aspace);
32}
33
34
35void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
36{
37 if (aspace)
38 kref_put(&aspace->kref, msm_gem_address_space_destroy);
39}
40
22void 41void
23msm_gem_unmap_vma(struct msm_gem_address_space *aspace, 42msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
24 struct msm_gem_vma *vma, struct sg_table *sgt) 43 struct msm_gem_vma *vma, struct sg_table *sgt)
@@ -34,6 +53,8 @@ msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
34 drm_mm_remove_node(&vma->node); 53 drm_mm_remove_node(&vma->node);
35 54
36 vma->iova = 0; 55 vma->iova = 0;
56
57 msm_gem_address_space_put(aspace);
37} 58}
38 59
39int 60int
@@ -57,16 +78,10 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
57 size, IOMMU_READ | IOMMU_WRITE); 78 size, IOMMU_READ | IOMMU_WRITE);
58 } 79 }
59 80
60 return ret; 81 /* Get a reference to the aspace to keep it around */
61} 82 kref_get(&aspace->kref);
62 83
63void 84 return ret;
64msm_gem_address_space_destroy(struct msm_gem_address_space *aspace)
65{
66 drm_mm_takedown(&aspace->mm);
67 if (aspace->mmu)
68 aspace->mmu->funcs->destroy(aspace->mmu);
69 kfree(aspace);
70} 85}
71 86
72struct msm_gem_address_space * 87struct msm_gem_address_space *
@@ -85,5 +100,7 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
85 drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT), 100 drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
86 (domain->geometry.aperture_end >> PAGE_SHIFT) - 1); 101 (domain->geometry.aperture_end >> PAGE_SHIFT) - 1);
87 102
103 kref_init(&aspace->kref);
104
88 return aspace; 105 return aspace;
89} 106}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 99e05aacbee1..97b9c38c6b3f 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -93,18 +93,18 @@ static int enable_clk(struct msm_gpu *gpu)
93{ 93{
94 int i; 94 int i;
95 95
96 if (gpu->grp_clks[0] && gpu->fast_rate) 96 if (gpu->core_clk && gpu->fast_rate)
97 clk_set_rate(gpu->grp_clks[0], gpu->fast_rate); 97 clk_set_rate(gpu->core_clk, gpu->fast_rate);
98 98
99 /* Set the RBBM timer rate to 19.2Mhz */ 99 /* Set the RBBM timer rate to 19.2Mhz */
100 if (gpu->grp_clks[2]) 100 if (gpu->rbbmtimer_clk)
101 clk_set_rate(gpu->grp_clks[2], 19200000); 101 clk_set_rate(gpu->rbbmtimer_clk, 19200000);
102 102
103 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--) 103 for (i = gpu->nr_clocks - 1; i >= 0; i--)
104 if (gpu->grp_clks[i]) 104 if (gpu->grp_clks[i])
105 clk_prepare(gpu->grp_clks[i]); 105 clk_prepare(gpu->grp_clks[i]);
106 106
107 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--) 107 for (i = gpu->nr_clocks - 1; i >= 0; i--)
108 if (gpu->grp_clks[i]) 108 if (gpu->grp_clks[i])
109 clk_enable(gpu->grp_clks[i]); 109 clk_enable(gpu->grp_clks[i]);
110 110
@@ -115,19 +115,24 @@ static int disable_clk(struct msm_gpu *gpu)
115{ 115{
116 int i; 116 int i;
117 117
118 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--) 118 for (i = gpu->nr_clocks - 1; i >= 0; i--)
119 if (gpu->grp_clks[i]) 119 if (gpu->grp_clks[i])
120 clk_disable(gpu->grp_clks[i]); 120 clk_disable(gpu->grp_clks[i]);
121 121
122 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--) 122 for (i = gpu->nr_clocks - 1; i >= 0; i--)
123 if (gpu->grp_clks[i]) 123 if (gpu->grp_clks[i])
124 clk_unprepare(gpu->grp_clks[i]); 124 clk_unprepare(gpu->grp_clks[i]);
125 125
126 if (gpu->grp_clks[0] && gpu->slow_rate) 126 /*
127 clk_set_rate(gpu->grp_clks[0], gpu->slow_rate); 127 * Set the clock to a deliberately low rate. On older targets the clock
128 * speed had to be non zero to avoid problems. On newer targets this
129 * will be rounded down to zero anyway so it all works out.
130 */
131 if (gpu->core_clk)
132 clk_set_rate(gpu->core_clk, 27000000);
128 133
129 if (gpu->grp_clks[2]) 134 if (gpu->rbbmtimer_clk)
130 clk_set_rate(gpu->grp_clks[2], 0); 135 clk_set_rate(gpu->rbbmtimer_clk, 0);
131 136
132 return 0; 137 return 0;
133} 138}
@@ -152,18 +157,9 @@ static int disable_axi(struct msm_gpu *gpu)
152 157
153int msm_gpu_pm_resume(struct msm_gpu *gpu) 158int msm_gpu_pm_resume(struct msm_gpu *gpu)
154{ 159{
155 struct drm_device *dev = gpu->dev;
156 int ret; 160 int ret;
157 161
158 DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt); 162 DBG("%s", gpu->name);
159
160 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
161
162 if (gpu->active_cnt++ > 0)
163 return 0;
164
165 if (WARN_ON(gpu->active_cnt <= 0))
166 return -EINVAL;
167 163
168 ret = enable_pwrrail(gpu); 164 ret = enable_pwrrail(gpu);
169 if (ret) 165 if (ret)
@@ -177,23 +173,16 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
177 if (ret) 173 if (ret)
178 return ret; 174 return ret;
179 175
176 gpu->needs_hw_init = true;
177
180 return 0; 178 return 0;
181} 179}
182 180
183int msm_gpu_pm_suspend(struct msm_gpu *gpu) 181int msm_gpu_pm_suspend(struct msm_gpu *gpu)
184{ 182{
185 struct drm_device *dev = gpu->dev;
186 int ret; 183 int ret;
187 184
188 DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt); 185 DBG("%s", gpu->name);
189
190 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
191
192 if (--gpu->active_cnt > 0)
193 return 0;
194
195 if (WARN_ON(gpu->active_cnt < 0))
196 return -EINVAL;
197 186
198 ret = disable_axi(gpu); 187 ret = disable_axi(gpu);
199 if (ret) 188 if (ret)
@@ -210,53 +199,20 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
210 return 0; 199 return 0;
211} 200}
212 201
213/* 202int msm_gpu_hw_init(struct msm_gpu *gpu)
214 * Inactivity detection (for suspend):
215 */
216
217static void inactive_worker(struct work_struct *work)
218{ 203{
219 struct msm_gpu *gpu = container_of(work, struct msm_gpu, inactive_work); 204 int ret;
220 struct drm_device *dev = gpu->dev;
221
222 if (gpu->inactive)
223 return;
224
225 DBG("%s: inactive!\n", gpu->name);
226 mutex_lock(&dev->struct_mutex);
227 if (!(msm_gpu_active(gpu) || gpu->inactive)) {
228 disable_axi(gpu);
229 disable_clk(gpu);
230 gpu->inactive = true;
231 }
232 mutex_unlock(&dev->struct_mutex);
233}
234 205
235static void inactive_handler(unsigned long data) 206 if (!gpu->needs_hw_init)
236{ 207 return 0;
237 struct msm_gpu *gpu = (struct msm_gpu *)data;
238 struct msm_drm_private *priv = gpu->dev->dev_private;
239 208
240 queue_work(priv->wq, &gpu->inactive_work); 209 disable_irq(gpu->irq);
241} 210 ret = gpu->funcs->hw_init(gpu);
211 if (!ret)
212 gpu->needs_hw_init = false;
213 enable_irq(gpu->irq);
242 214
243/* cancel inactive timer and make sure we are awake: */ 215 return ret;
244static void inactive_cancel(struct msm_gpu *gpu)
245{
246 DBG("%s", gpu->name);
247 del_timer(&gpu->inactive_timer);
248 if (gpu->inactive) {
249 enable_clk(gpu);
250 enable_axi(gpu);
251 gpu->inactive = false;
252 }
253}
254
255static void inactive_start(struct msm_gpu *gpu)
256{
257 DBG("%s", gpu->name);
258 mod_timer(&gpu->inactive_timer,
259 round_jiffies_up(jiffies + DRM_MSM_INACTIVE_JIFFIES));
260} 216}
261 217
262/* 218/*
@@ -296,8 +252,9 @@ static void recover_worker(struct work_struct *work)
296 /* retire completed submits, plus the one that hung: */ 252 /* retire completed submits, plus the one that hung: */
297 retire_submits(gpu); 253 retire_submits(gpu);
298 254
299 inactive_cancel(gpu); 255 pm_runtime_get_sync(&gpu->pdev->dev);
300 gpu->funcs->recover(gpu); 256 gpu->funcs->recover(gpu);
257 pm_runtime_put_sync(&gpu->pdev->dev);
301 258
302 /* replay the remaining submits after the one that hung: */ 259 /* replay the remaining submits after the one that hung: */
303 list_for_each_entry(submit, &gpu->submit_list, node) { 260 list_for_each_entry(submit, &gpu->submit_list, node) {
@@ -400,6 +357,8 @@ void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
400{ 357{
401 unsigned long flags; 358 unsigned long flags;
402 359
360 pm_runtime_get_sync(&gpu->pdev->dev);
361
403 spin_lock_irqsave(&gpu->perf_lock, flags); 362 spin_lock_irqsave(&gpu->perf_lock, flags);
404 /* we could dynamically enable/disable perfcntr registers too.. */ 363 /* we could dynamically enable/disable perfcntr registers too.. */
405 gpu->last_sample.active = msm_gpu_active(gpu); 364 gpu->last_sample.active = msm_gpu_active(gpu);
@@ -413,6 +372,7 @@ void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
413void msm_gpu_perfcntr_stop(struct msm_gpu *gpu) 372void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
414{ 373{
415 gpu->perfcntr_active = false; 374 gpu->perfcntr_active = false;
375 pm_runtime_put_sync(&gpu->pdev->dev);
416} 376}
417 377
418/* returns -errno or # of cntrs sampled */ 378/* returns -errno or # of cntrs sampled */
@@ -458,6 +418,8 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
458 drm_gem_object_unreference(&msm_obj->base); 418 drm_gem_object_unreference(&msm_obj->base);
459 } 419 }
460 420
421 pm_runtime_mark_last_busy(&gpu->pdev->dev);
422 pm_runtime_put_autosuspend(&gpu->pdev->dev);
461 msm_gem_submit_free(submit); 423 msm_gem_submit_free(submit);
462} 424}
463 425
@@ -492,9 +454,6 @@ static void retire_worker(struct work_struct *work)
492 mutex_lock(&dev->struct_mutex); 454 mutex_lock(&dev->struct_mutex);
493 retire_submits(gpu); 455 retire_submits(gpu);
494 mutex_unlock(&dev->struct_mutex); 456 mutex_unlock(&dev->struct_mutex);
495
496 if (!msm_gpu_active(gpu))
497 inactive_start(gpu);
498} 457}
499 458
500/* call from irq handler to schedule work to retire bo's */ 459/* call from irq handler to schedule work to retire bo's */
@@ -515,7 +474,9 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
515 474
516 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 475 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
517 476
518 inactive_cancel(gpu); 477 pm_runtime_get_sync(&gpu->pdev->dev);
478
479 msm_gpu_hw_init(gpu);
519 480
520 list_add_tail(&submit->node, &gpu->submit_list); 481 list_add_tail(&submit->node, &gpu->submit_list);
521 482
@@ -559,16 +520,52 @@ static irqreturn_t irq_handler(int irq, void *data)
559 return gpu->funcs->irq(gpu); 520 return gpu->funcs->irq(gpu);
560} 521}
561 522
562static const char *clk_names[] = { 523static struct clk *get_clock(struct device *dev, const char *name)
563 "core", "iface", "rbbmtimer", "mem", "mem_iface", "alt_mem_iface", 524{
564}; 525 struct clk *clk = devm_clk_get(dev, name);
526
527 return IS_ERR(clk) ? NULL : clk;
528}
529
530static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
531{
532 struct device *dev = &pdev->dev;
533 struct property *prop;
534 const char *name;
535 int i = 0;
536
537 gpu->nr_clocks = of_property_count_strings(dev->of_node, "clock-names");
538 if (gpu->nr_clocks < 1) {
539 gpu->nr_clocks = 0;
540 return 0;
541 }
542
543 gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks,
544 GFP_KERNEL);
545 if (!gpu->grp_clks)
546 return -ENOMEM;
547
548 of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
549 gpu->grp_clks[i] = get_clock(dev, name);
550
551 /* Remember the key clocks that we need to control later */
552 if (!strcmp(name, "core"))
553 gpu->core_clk = gpu->grp_clks[i];
554 else if (!strcmp(name, "rbbmtimer"))
555 gpu->rbbmtimer_clk = gpu->grp_clks[i];
556
557 ++i;
558 }
559
560 return 0;
561}
565 562
566int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, 563int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
567 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, 564 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
568 const char *name, const char *ioname, const char *irqname, int ringsz) 565 const char *name, const char *ioname, const char *irqname, int ringsz)
569{ 566{
570 struct iommu_domain *iommu; 567 struct iommu_domain *iommu;
571 int i, ret; 568 int ret;
572 569
573 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs))) 570 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
574 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs); 571 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
@@ -576,7 +573,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
576 gpu->dev = drm; 573 gpu->dev = drm;
577 gpu->funcs = funcs; 574 gpu->funcs = funcs;
578 gpu->name = name; 575 gpu->name = name;
579 gpu->inactive = true;
580 gpu->fctx = msm_fence_context_alloc(drm, name); 576 gpu->fctx = msm_fence_context_alloc(drm, name);
581 if (IS_ERR(gpu->fctx)) { 577 if (IS_ERR(gpu->fctx)) {
582 ret = PTR_ERR(gpu->fctx); 578 ret = PTR_ERR(gpu->fctx);
@@ -586,19 +582,15 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
586 582
587 INIT_LIST_HEAD(&gpu->active_list); 583 INIT_LIST_HEAD(&gpu->active_list);
588 INIT_WORK(&gpu->retire_work, retire_worker); 584 INIT_WORK(&gpu->retire_work, retire_worker);
589 INIT_WORK(&gpu->inactive_work, inactive_worker);
590 INIT_WORK(&gpu->recover_work, recover_worker); 585 INIT_WORK(&gpu->recover_work, recover_worker);
591 586
592 INIT_LIST_HEAD(&gpu->submit_list); 587 INIT_LIST_HEAD(&gpu->submit_list);
593 588
594 setup_timer(&gpu->inactive_timer, inactive_handler,
595 (unsigned long)gpu);
596 setup_timer(&gpu->hangcheck_timer, hangcheck_handler, 589 setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
597 (unsigned long)gpu); 590 (unsigned long)gpu);
598 591
599 spin_lock_init(&gpu->perf_lock); 592 spin_lock_init(&gpu->perf_lock);
600 593
601 BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));
602 594
603 /* Map registers: */ 595 /* Map registers: */
604 gpu->mmio = msm_ioremap(pdev, ioname, name); 596 gpu->mmio = msm_ioremap(pdev, ioname, name);
@@ -622,13 +614,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
622 goto fail; 614 goto fail;
623 } 615 }
624 616
625 /* Acquire clocks: */ 617 ret = get_clocks(pdev, gpu);
626 for (i = 0; i < ARRAY_SIZE(clk_names); i++) { 618 if (ret)
627 gpu->grp_clks[i] = msm_clk_get(pdev, clk_names[i]); 619 goto fail;
628 DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
629 if (IS_ERR(gpu->grp_clks[i]))
630 gpu->grp_clks[i] = NULL;
631 }
632 620
633 gpu->ebi1_clk = msm_clk_get(pdev, "bus"); 621 gpu->ebi1_clk = msm_clk_get(pdev, "bus");
634 DBG("ebi1_clk: %p", gpu->ebi1_clk); 622 DBG("ebi1_clk: %p", gpu->ebi1_clk);
@@ -684,6 +672,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
684 goto fail; 672 goto fail;
685 } 673 }
686 674
675 gpu->pdev = pdev;
676 platform_set_drvdata(pdev, gpu);
677
687 bs_init(gpu); 678 bs_init(gpu);
688 679
689 return 0; 680 return 0;
@@ -706,9 +697,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
706 msm_ringbuffer_destroy(gpu->rb); 697 msm_ringbuffer_destroy(gpu->rb);
707 } 698 }
708 699
709 if (gpu->aspace)
710 msm_gem_address_space_destroy(gpu->aspace);
711
712 if (gpu->fctx) 700 if (gpu->fctx)
713 msm_fence_context_free(gpu->fctx); 701 msm_fence_context_free(gpu->fctx);
714} 702}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index c4c39d3272c7..aa3241000455 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -64,6 +64,7 @@ struct msm_gpu_funcs {
64struct msm_gpu { 64struct msm_gpu {
65 const char *name; 65 const char *name;
66 struct drm_device *dev; 66 struct drm_device *dev;
67 struct platform_device *pdev;
67 const struct msm_gpu_funcs *funcs; 68 const struct msm_gpu_funcs *funcs;
68 69
69 /* performance counters (hw & sw): */ 70 /* performance counters (hw & sw): */
@@ -88,9 +89,8 @@ struct msm_gpu {
88 /* fencing: */ 89 /* fencing: */
89 struct msm_fence_context *fctx; 90 struct msm_fence_context *fctx;
90 91
91 /* is gpu powered/active? */ 92 /* does gpu need hw_init? */
92 int active_cnt; 93 bool needs_hw_init;
93 bool inactive;
94 94
95 /* worker for handling active-list retiring: */ 95 /* worker for handling active-list retiring: */
96 struct work_struct retire_work; 96 struct work_struct retire_work;
@@ -103,8 +103,10 @@ struct msm_gpu {
103 103
104 /* Power Control: */ 104 /* Power Control: */
105 struct regulator *gpu_reg, *gpu_cx; 105 struct regulator *gpu_reg, *gpu_cx;
106 struct clk *ebi1_clk, *grp_clks[6]; 106 struct clk **grp_clks;
107 uint32_t fast_rate, slow_rate, bus_freq; 107 int nr_clocks;
108 struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
109 uint32_t fast_rate, bus_freq;
108 110
109#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING 111#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
110 struct msm_bus_scale_pdata *bus_scale_table; 112 struct msm_bus_scale_pdata *bus_scale_table;
@@ -114,9 +116,7 @@ struct msm_gpu {
114 /* Hang and Inactivity Detection: 116 /* Hang and Inactivity Detection:
115 */ 117 */
116#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */ 118#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
117#define DRM_MSM_INACTIVE_JIFFIES msecs_to_jiffies(DRM_MSM_INACTIVE_PERIOD) 119
118 struct timer_list inactive_timer;
119 struct work_struct inactive_work;
120#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */ 120#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
121#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD) 121#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
122 struct timer_list hangcheck_timer; 122 struct timer_list hangcheck_timer;
@@ -196,6 +196,8 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
196int msm_gpu_pm_suspend(struct msm_gpu *gpu); 196int msm_gpu_pm_suspend(struct msm_gpu *gpu);
197int msm_gpu_pm_resume(struct msm_gpu *gpu); 197int msm_gpu_pm_resume(struct msm_gpu *gpu);
198 198
199int msm_gpu_hw_init(struct msm_gpu *gpu);
200
199void msm_gpu_perfcntr_start(struct msm_gpu *gpu); 201void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
200void msm_gpu_perfcntr_stop(struct msm_gpu *gpu); 202void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
201int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime, 203int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 7f5779daf5c8..b23d33622f37 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -38,78 +38,47 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names,
38 int cnt) 38 int cnt)
39{ 39{
40 struct msm_iommu *iommu = to_msm_iommu(mmu); 40 struct msm_iommu *iommu = to_msm_iommu(mmu);
41 return iommu_attach_device(iommu->domain, mmu->dev); 41 int ret;
42
43 pm_runtime_get_sync(mmu->dev);
44 ret = iommu_attach_device(iommu->domain, mmu->dev);
45 pm_runtime_put_sync(mmu->dev);
46
47 return ret;
42} 48}
43 49
44static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names, 50static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
45 int cnt) 51 int cnt)
46{ 52{
47 struct msm_iommu *iommu = to_msm_iommu(mmu); 53 struct msm_iommu *iommu = to_msm_iommu(mmu);
54
55 pm_runtime_get_sync(mmu->dev);
48 iommu_detach_device(iommu->domain, mmu->dev); 56 iommu_detach_device(iommu->domain, mmu->dev);
57 pm_runtime_put_sync(mmu->dev);
49} 58}
50 59
51static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova, 60static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
52 struct sg_table *sgt, unsigned len, int prot) 61 struct sg_table *sgt, unsigned len, int prot)
53{ 62{
54 struct msm_iommu *iommu = to_msm_iommu(mmu); 63 struct msm_iommu *iommu = to_msm_iommu(mmu);
55 struct iommu_domain *domain = iommu->domain; 64 size_t ret;
56 struct scatterlist *sg;
57 unsigned long da = iova;
58 unsigned int i, j;
59 int ret;
60
61 if (!domain || !sgt)
62 return -EINVAL;
63
64 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
65 dma_addr_t pa = sg_phys(sg) - sg->offset;
66 size_t bytes = sg->length + sg->offset;
67
68 VERB("map[%d]: %08lx %08lx(%zx)", i, da, (unsigned long)pa, bytes);
69 65
70 ret = iommu_map(domain, da, pa, bytes, prot); 66// pm_runtime_get_sync(mmu->dev);
71 if (ret) 67 ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
72 goto fail; 68// pm_runtime_put_sync(mmu->dev);
69 WARN_ON(ret < 0);
73 70
74 da += bytes; 71 return (ret == len) ? 0 : -EINVAL;
75 }
76
77 return 0;
78
79fail:
80 da = iova;
81
82 for_each_sg(sgt->sgl, sg, i, j) {
83 size_t bytes = sg->length + sg->offset;
84 iommu_unmap(domain, da, bytes);
85 da += bytes;
86 }
87 return ret;
88} 72}
89 73
90static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, 74static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
91 struct sg_table *sgt, unsigned len) 75 struct sg_table *sgt, unsigned len)
92{ 76{
93 struct msm_iommu *iommu = to_msm_iommu(mmu); 77 struct msm_iommu *iommu = to_msm_iommu(mmu);
94 struct iommu_domain *domain = iommu->domain;
95 struct scatterlist *sg;
96 unsigned long da = iova;
97 int i;
98
99 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
100 size_t bytes = sg->length + sg->offset;
101 size_t unmapped;
102
103 unmapped = iommu_unmap(domain, da, bytes);
104 if (unmapped < bytes)
105 return unmapped;
106
107 VERB("unmap[%d]: %08lx(%zx)", i, da, bytes);
108
109 BUG_ON(!PAGE_ALIGNED(bytes));
110 78
111 da += bytes; 79 pm_runtime_get_sync(mmu->dev);
112 } 80 iommu_unmap(iommu->domain, iova, len);
81 pm_runtime_put_sync(mmu->dev);
113 82
114 return 0; 83 return 0;
115} 84}
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 3df7322fd74e..0e81faab2c50 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -322,7 +322,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
322 } 322 }
323 323
324 for (i = 0; i < submit->nr_cmds; i++) { 324 for (i = 0; i < submit->nr_cmds; i++) {
325 uint32_t iova = submit->cmd[i].iova; 325 uint64_t iova = submit->cmd[i].iova;
326 uint32_t szd = submit->cmd[i].size; /* in dwords */ 326 uint32_t szd = submit->cmd[i].size; /* in dwords */
327 327
328 /* snapshot cmdstream bo's (if we haven't already): */ 328 /* snapshot cmdstream bo's (if we haven't already): */
@@ -341,7 +341,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
341 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: 341 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
342 case MSM_SUBMIT_CMD_BUF: 342 case MSM_SUBMIT_CMD_BUF:
343 rd_write_section(rd, RD_CMDSTREAM_ADDR, 343 rd_write_section(rd, RD_CMDSTREAM_ADDR,
344 (uint32_t[2]){ iova, szd }, 8); 344 (uint32_t[3]){ iova, szd, iova >> 32 }, 12);
345 break; 345 break;
346 } 346 }
347 } 347 }
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_out.c b/drivers/gpu/drm/mxsfb/mxsfb_out.c
index b8e81422d4e2..f7d729aa09bd 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_out.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_out.c
@@ -19,6 +19,7 @@
19#include <drm/drm_crtc_helper.h> 19#include <drm/drm_crtc_helper.h>
20#include <drm/drm_fb_cma_helper.h> 20#include <drm/drm_fb_cma_helper.h>
21#include <drm/drm_gem_cma_helper.h> 21#include <drm/drm_gem_cma_helper.h>
22#include <drm/drm_of.h>
22#include <drm/drm_panel.h> 23#include <drm/drm_panel.h>
23#include <drm/drm_plane_helper.h> 24#include <drm/drm_plane_helper.h>
24#include <drm/drm_simple_kms_helper.h> 25#include <drm/drm_simple_kms_helper.h>
@@ -82,20 +83,15 @@ static const struct drm_connector_funcs mxsfb_panel_connector_funcs = {
82 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 83 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
83}; 84};
84 85
85static int mxsfb_attach_endpoint(struct drm_device *drm, 86int mxsfb_create_output(struct drm_device *drm)
86 const struct of_endpoint *ep)
87{ 87{
88 struct mxsfb_drm_private *mxsfb = drm->dev_private; 88 struct mxsfb_drm_private *mxsfb = drm->dev_private;
89 struct device_node *np;
90 struct drm_panel *panel; 89 struct drm_panel *panel;
91 int ret = -EPROBE_DEFER; 90 int ret;
92
93 np = of_graph_get_remote_port_parent(ep->local_node);
94 panel = of_drm_find_panel(np);
95 of_node_put(np);
96 91
97 if (!panel) 92 ret = drm_of_find_panel_or_bridge(drm->dev->of_node, 0, 0, &panel, NULL);
98 return -EPROBE_DEFER; 93 if (ret)
94 return ret;
99 95
100 mxsfb->connector.dpms = DRM_MODE_DPMS_OFF; 96 mxsfb->connector.dpms = DRM_MODE_DPMS_OFF;
101 mxsfb->connector.polled = 0; 97 mxsfb->connector.polled = 0;
@@ -109,27 +105,3 @@ static int mxsfb_attach_endpoint(struct drm_device *drm,
109 105
110 return ret; 106 return ret;
111} 107}
112
113int mxsfb_create_output(struct drm_device *drm)
114{
115 struct mxsfb_drm_private *mxsfb = drm->dev_private;
116 struct device_node *ep_np = NULL;
117 struct of_endpoint ep;
118 int ret;
119
120 for_each_endpoint_of_node(drm->dev->of_node, ep_np) {
121 ret = of_graph_parse_endpoint(ep_np, &ep);
122 if (!ret)
123 ret = mxsfb_attach_endpoint(drm, &ep);
124
125 if (ret) {
126 of_node_put(ep_np);
127 return ret;
128 }
129 }
130
131 if (!mxsfb->panel)
132 return -EPROBE_DEFER;
133
134 return 0;
135}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 43ab560de7f9..4b4b0b496262 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -788,7 +788,8 @@ nv_crtc_disable(struct drm_crtc *crtc)
788 788
789static int 789static int
790nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, 790nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
791 uint32_t size) 791 uint32_t size,
792 struct drm_modeset_acquire_ctx *ctx)
792{ 793{
793 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 794 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
794 int i; 795 int i;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 548f36d33924..e427f80344c4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1574,6 +1574,7 @@ struct ttm_bo_driver nouveau_bo_driver = {
1574 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, 1574 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1575 .io_mem_reserve = &nouveau_ttm_io_mem_reserve, 1575 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1576 .io_mem_free = &nouveau_ttm_io_mem_free, 1576 .io_mem_free = &nouveau_ttm_io_mem_free,
1577 .io_mem_pfn = ttm_bo_default_io_mem_pfn,
1577}; 1578};
1578 1579
1579struct nvkm_vma * 1580struct nvkm_vma *
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index c9910c8537ed..0e58537352fe 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -2210,25 +2210,16 @@ nv50_head_lut_load(struct drm_crtc *crtc)
2210 } 2210 }
2211} 2211}
2212 2212
2213static int
2214nv50_head_mode_set_base_atomic(struct drm_crtc *crtc,
2215 struct drm_framebuffer *fb, int x, int y,
2216 enum mode_set_atomic state)
2217{
2218 WARN_ON(1);
2219 return 0;
2220}
2221
2222static const struct drm_crtc_helper_funcs 2213static const struct drm_crtc_helper_funcs
2223nv50_head_help = { 2214nv50_head_help = {
2224 .mode_set_base_atomic = nv50_head_mode_set_base_atomic,
2225 .load_lut = nv50_head_lut_load, 2215 .load_lut = nv50_head_lut_load,
2226 .atomic_check = nv50_head_atomic_check, 2216 .atomic_check = nv50_head_atomic_check,
2227}; 2217};
2228 2218
2229static int 2219static int
2230nv50_head_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, 2220nv50_head_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
2231 uint32_t size) 2221 uint32_t size,
2222 struct drm_modeset_acquire_ctx *ctx)
2232{ 2223{
2233 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 2224 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
2234 u32 i; 2225 u32 i;
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 51d90a8a61cd..8a730a7afe76 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -877,7 +877,7 @@ int dpi_init_port(struct platform_device *pdev, struct device_node *port)
877 if (!dpi) 877 if (!dpi)
878 return -ENOMEM; 878 return -ENOMEM;
879 879
880 ep = omapdss_of_get_next_endpoint(port, NULL); 880 ep = of_get_next_child(port, NULL);
881 if (!ep) 881 if (!ep)
882 return 0; 882 return 0;
883 883
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index f50d6fc0d92e..910754bf8cf9 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -39,6 +39,7 @@
39#include <linux/debugfs.h> 39#include <linux/debugfs.h>
40#include <linux/pm_runtime.h> 40#include <linux/pm_runtime.h>
41#include <linux/of.h> 41#include <linux/of.h>
42#include <linux/of_graph.h>
42#include <linux/of_platform.h> 43#include <linux/of_platform.h>
43#include <linux/component.h> 44#include <linux/component.h>
44 45
@@ -5090,7 +5091,7 @@ static int dsi_probe_of(struct platform_device *pdev)
5090 struct device_node *ep; 5091 struct device_node *ep;
5091 struct omap_dsi_pin_config pin_cfg; 5092 struct omap_dsi_pin_config pin_cfg;
5092 5093
5093 ep = omapdss_of_get_first_endpoint(node); 5094 ep = of_graph_get_endpoint_by_regs(node, 0, 0);
5094 if (!ep) 5095 if (!ep)
5095 return 0; 5096 return 0;
5096 5097
diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c
index b46606b0f014..c6b86f348a5c 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss-of.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c
@@ -16,76 +16,11 @@
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/of.h> 18#include <linux/of.h>
19#include <linux/of_graph.h>
19#include <linux/seq_file.h> 20#include <linux/seq_file.h>
20 21
21#include "omapdss.h" 22#include "omapdss.h"
22 23
23struct device_node *
24omapdss_of_get_next_port(const struct device_node *parent,
25 struct device_node *prev)
26{
27 struct device_node *port = NULL;
28
29 if (!parent)
30 return NULL;
31
32 if (!prev) {
33 struct device_node *ports;
34 /*
35 * It's the first call, we have to find a port subnode
36 * within this node or within an optional 'ports' node.
37 */
38 ports = of_get_child_by_name(parent, "ports");
39 if (ports)
40 parent = ports;
41
42 port = of_get_child_by_name(parent, "port");
43
44 /* release the 'ports' node */
45 of_node_put(ports);
46 } else {
47 struct device_node *ports;
48
49 ports = of_get_parent(prev);
50 if (!ports)
51 return NULL;
52
53 do {
54 port = of_get_next_child(ports, prev);
55 if (!port) {
56 of_node_put(ports);
57 return NULL;
58 }
59 prev = port;
60 } while (of_node_cmp(port->name, "port") != 0);
61
62 of_node_put(ports);
63 }
64
65 return port;
66}
67EXPORT_SYMBOL_GPL(omapdss_of_get_next_port);
68
69struct device_node *
70omapdss_of_get_next_endpoint(const struct device_node *parent,
71 struct device_node *prev)
72{
73 struct device_node *ep = NULL;
74
75 if (!parent)
76 return NULL;
77
78 do {
79 ep = of_get_next_child(parent, prev);
80 if (!ep)
81 return NULL;
82 prev = ep;
83 } while (of_node_cmp(ep->name, "endpoint") != 0);
84
85 return ep;
86}
87EXPORT_SYMBOL_GPL(omapdss_of_get_next_endpoint);
88
89struct device_node *dss_of_port_get_parent_device(struct device_node *port) 24struct device_node *dss_of_port_get_parent_device(struct device_node *port)
90{ 25{
91 struct device_node *np; 26 struct device_node *np;
@@ -124,37 +59,6 @@ u32 dss_of_port_get_port_number(struct device_node *port)
124} 59}
125EXPORT_SYMBOL_GPL(dss_of_port_get_port_number); 60EXPORT_SYMBOL_GPL(dss_of_port_get_port_number);
126 61
127static struct device_node *omapdss_of_get_remote_port(const struct device_node *node)
128{
129 struct device_node *np;
130
131 np = of_parse_phandle(node, "remote-endpoint", 0);
132 if (!np)
133 return NULL;
134
135 np = of_get_next_parent(np);
136
137 return np;
138}
139
140struct device_node *
141omapdss_of_get_first_endpoint(const struct device_node *parent)
142{
143 struct device_node *port, *ep;
144
145 port = omapdss_of_get_next_port(parent, NULL);
146
147 if (!port)
148 return NULL;
149
150 ep = omapdss_of_get_next_endpoint(port, NULL);
151
152 of_node_put(port);
153
154 return ep;
155}
156EXPORT_SYMBOL_GPL(omapdss_of_get_first_endpoint);
157
158struct omap_dss_device * 62struct omap_dss_device *
159omapdss_of_find_source_for_first_ep(struct device_node *node) 63omapdss_of_find_source_for_first_ep(struct device_node *node)
160{ 64{
@@ -162,11 +66,11 @@ omapdss_of_find_source_for_first_ep(struct device_node *node)
162 struct device_node *src_port; 66 struct device_node *src_port;
163 struct omap_dss_device *src; 67 struct omap_dss_device *src;
164 68
165 ep = omapdss_of_get_first_endpoint(node); 69 ep = of_graph_get_endpoint_by_regs(node, 0, 0);
166 if (!ep) 70 if (!ep)
167 return ERR_PTR(-EINVAL); 71 return ERR_PTR(-EINVAL);
168 72
169 src_port = omapdss_of_get_remote_port(ep); 73 src_port = of_graph_get_remote_port(ep);
170 if (!src_port) { 74 if (!src_port) {
171 of_node_put(ep); 75 of_node_put(ep);
172 return ERR_PTR(-EINVAL); 76 return ERR_PTR(-EINVAL);
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index ceb483650f8c..fa99ec72d832 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -38,6 +38,7 @@
38#include <linux/mfd/syscon.h> 38#include <linux/mfd/syscon.h>
39#include <linux/regmap.h> 39#include <linux/regmap.h>
40#include <linux/of.h> 40#include <linux/of.h>
41#include <linux/of_graph.h>
41#include <linux/regulator/consumer.h> 42#include <linux/regulator/consumer.h>
42#include <linux/suspend.h> 43#include <linux/suspend.h>
43#include <linux/component.h> 44#include <linux/component.h>
@@ -1035,32 +1036,14 @@ static int dss_init_ports(struct platform_device *pdev)
1035{ 1036{
1036 struct device_node *parent = pdev->dev.of_node; 1037 struct device_node *parent = pdev->dev.of_node;
1037 struct device_node *port; 1038 struct device_node *port;
1038 int r; 1039 int i;
1039
1040 if (parent == NULL)
1041 return 0;
1042 1040
1043 port = omapdss_of_get_next_port(parent, NULL); 1041 for (i = 0; i < dss.feat->num_ports; i++) {
1044 if (!port) 1042 port = of_graph_get_port_by_id(parent, i);
1045 return 0; 1043 if (!port)
1046
1047 if (dss.feat->num_ports == 0)
1048 return 0;
1049
1050 do {
1051 enum omap_display_type port_type;
1052 u32 reg;
1053
1054 r = of_property_read_u32(port, "reg", &reg);
1055 if (r)
1056 reg = 0;
1057
1058 if (reg >= dss.feat->num_ports)
1059 continue; 1044 continue;
1060 1045
1061 port_type = dss.feat->ports[reg]; 1046 switch (dss.feat->ports[i]) {
1062
1063 switch (port_type) {
1064 case OMAP_DISPLAY_TYPE_DPI: 1047 case OMAP_DISPLAY_TYPE_DPI:
1065 dpi_init_port(pdev, port); 1048 dpi_init_port(pdev, port);
1066 break; 1049 break;
@@ -1070,7 +1053,7 @@ static int dss_init_ports(struct platform_device *pdev)
1070 default: 1053 default:
1071 break; 1054 break;
1072 } 1055 }
1073 } while ((port = omapdss_of_get_next_port(parent, port)) != NULL); 1056 }
1074 1057
1075 return 0; 1058 return 0;
1076} 1059}
@@ -1079,32 +1062,14 @@ static void dss_uninit_ports(struct platform_device *pdev)
1079{ 1062{
1080 struct device_node *parent = pdev->dev.of_node; 1063 struct device_node *parent = pdev->dev.of_node;
1081 struct device_node *port; 1064 struct device_node *port;
1065 int i;
1082 1066
1083 if (parent == NULL) 1067 for (i = 0; i < dss.feat->num_ports; i++) {
1084 return; 1068 port = of_graph_get_port_by_id(parent, i);
1085 1069 if (!port)
1086 port = omapdss_of_get_next_port(parent, NULL);
1087 if (!port)
1088 return;
1089
1090 if (dss.feat->num_ports == 0)
1091 return;
1092
1093 do {
1094 enum omap_display_type port_type;
1095 u32 reg;
1096 int r;
1097
1098 r = of_property_read_u32(port, "reg", &reg);
1099 if (r)
1100 reg = 0;
1101
1102 if (reg >= dss.feat->num_ports)
1103 continue; 1070 continue;
1104 1071
1105 port_type = dss.feat->ports[reg]; 1072 switch (dss.feat->ports[i]) {
1106
1107 switch (port_type) {
1108 case OMAP_DISPLAY_TYPE_DPI: 1073 case OMAP_DISPLAY_TYPE_DPI:
1109 dpi_uninit_port(port); 1074 dpi_uninit_port(port);
1110 break; 1075 break;
@@ -1114,7 +1079,7 @@ static void dss_uninit_ports(struct platform_device *pdev)
1114 default: 1079 default:
1115 break; 1080 break;
1116 } 1081 }
1117 } while ((port = omapdss_of_get_next_port(parent, port)) != NULL); 1082 }
1118} 1083}
1119 1084
1120static int dss_video_pll_probe(struct platform_device *pdev) 1085static int dss_video_pll_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index e7162c16de2e..87c53034c634 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -34,6 +34,7 @@
34#include <linux/regulator/consumer.h> 34#include <linux/regulator/consumer.h>
35#include <linux/component.h> 35#include <linux/component.h>
36#include <linux/of.h> 36#include <linux/of.h>
37#include <linux/of_graph.h>
37#include <sound/omap-hdmi-audio.h> 38#include <sound/omap-hdmi-audio.h>
38 39
39#include "omapdss.h" 40#include "omapdss.h"
@@ -546,7 +547,7 @@ static int hdmi_probe_of(struct platform_device *pdev)
546 struct device_node *ep; 547 struct device_node *ep;
547 int r; 548 int r;
548 549
549 ep = omapdss_of_get_first_endpoint(node); 550 ep = of_graph_get_endpoint_by_regs(node, 0, 0);
550 if (!ep) 551 if (!ep)
551 return 0; 552 return 0;
552 553
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index 678dfb02764a..d13dce7e8079 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -39,6 +39,7 @@
39#include <linux/regulator/consumer.h> 39#include <linux/regulator/consumer.h>
40#include <linux/component.h> 40#include <linux/component.h>
41#include <linux/of.h> 41#include <linux/of.h>
42#include <linux/of_graph.h>
42#include <sound/omap-hdmi-audio.h> 43#include <sound/omap-hdmi-audio.h>
43 44
44#include "omapdss.h" 45#include "omapdss.h"
@@ -572,7 +573,7 @@ static int hdmi_probe_of(struct platform_device *pdev)
572 struct device_node *ep; 573 struct device_node *ep;
573 int r; 574 int r;
574 575
575 ep = omapdss_of_get_first_endpoint(node); 576 ep = of_graph_get_endpoint_by_regs(node, 0, 0);
576 if (!ep) 577 if (!ep)
577 return 0; 578 return 0;
578 579
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 63c2684f889b..b19dae1fd6c5 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -830,17 +830,6 @@ static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
830 return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE; 830 return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
831} 831}
832 832
833struct device_node *
834omapdss_of_get_next_port(const struct device_node *parent,
835 struct device_node *prev);
836
837struct device_node *
838omapdss_of_get_next_endpoint(const struct device_node *parent,
839 struct device_node *prev);
840
841struct device_node *
842omapdss_of_get_first_endpoint(const struct device_node *parent);
843
844struct omap_dss_device * 833struct omap_dss_device *
845omapdss_of_find_source_for_first_ep(struct device_node *node); 834omapdss_of_find_source_for_first_ep(struct device_node *node);
846 835
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index b3bda2d3c08d..0620b9f8c231 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -414,7 +414,7 @@ int sdi_init_port(struct platform_device *pdev, struct device_node *port)
414 u32 datapairs; 414 u32 datapairs;
415 int r; 415 int r;
416 416
417 ep = omapdss_of_get_next_endpoint(port, NULL); 417 ep = of_get_next_child(port, NULL);
418 if (!ep) 418 if (!ep)
419 return 0; 419 return 0;
420 420
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index d74f7fcc2e46..19d14957f566 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -35,6 +35,7 @@
35#include <linux/regulator/consumer.h> 35#include <linux/regulator/consumer.h>
36#include <linux/pm_runtime.h> 36#include <linux/pm_runtime.h>
37#include <linux/of.h> 37#include <linux/of.h>
38#include <linux/of_graph.h>
38#include <linux/component.h> 39#include <linux/component.h>
39 40
40#include "omapdss.h" 41#include "omapdss.h"
@@ -818,7 +819,7 @@ static int venc_probe_of(struct platform_device *pdev)
818 u32 channels; 819 u32 channels;
819 int r; 820 int r;
820 821
821 ep = omapdss_of_get_first_endpoint(node); 822 ep = of_graph_get_endpoint_by_regs(node, 0, 0);
822 if (!ep) 823 if (!ep)
823 return 0; 824 return 0;
824 825
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index ee5883f59be5..0dbe0306953d 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -160,10 +160,10 @@ static struct dma_buf_ops omap_dmabuf_ops = {
160 .release = omap_gem_dmabuf_release, 160 .release = omap_gem_dmabuf_release,
161 .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access, 161 .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
162 .end_cpu_access = omap_gem_dmabuf_end_cpu_access, 162 .end_cpu_access = omap_gem_dmabuf_end_cpu_access,
163 .kmap_atomic = omap_gem_dmabuf_kmap_atomic, 163 .map_atomic = omap_gem_dmabuf_kmap_atomic,
164 .kunmap_atomic = omap_gem_dmabuf_kunmap_atomic, 164 .unmap_atomic = omap_gem_dmabuf_kunmap_atomic,
165 .kmap = omap_gem_dmabuf_kmap, 165 .map = omap_gem_dmabuf_kmap,
166 .kunmap = omap_gem_dmabuf_kunmap, 166 .unmap = omap_gem_dmabuf_kunmap,
167 .mmap = omap_gem_dmabuf_mmap, 167 .mmap = omap_gem_dmabuf_mmap,
168}; 168};
169 169
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 5dc2106da2bc..3e29a9903303 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -62,6 +62,12 @@ config DRM_PANEL_PANASONIC_VVX10F034N00
62 WUXGA (1920x1200) Novatek NT1397-based DSI panel as found in some 62 WUXGA (1920x1200) Novatek NT1397-based DSI panel as found in some
63 Xperia Z2 tablets 63 Xperia Z2 tablets
64 64
65config DRM_PANEL_SAMSUNG_S6E3HA2
66 tristate "Samsung S6E3HA2 DSI video mode panel"
67 depends on OF
68 depends on DRM_MIPI_DSI
69 select VIDEOMODE_HELPERS
70
65config DRM_PANEL_SAMSUNG_S6E8AA0 71config DRM_PANEL_SAMSUNG_S6E8AA0
66 tristate "Samsung S6E8AA0 DSI video mode panel" 72 tristate "Samsung S6E8AA0 DSI video mode panel"
67 depends on OF 73 depends on OF
@@ -91,4 +97,11 @@ config DRM_PANEL_SHARP_LS043T1LE01
91 Say Y here if you want to enable support for Sharp LS043T1LE01 qHD 97 Say Y here if you want to enable support for Sharp LS043T1LE01 qHD
92 (540x960) DSI panel as found on the Qualcomm APQ8074 Dragonboard 98 (540x960) DSI panel as found on the Qualcomm APQ8074 Dragonboard
93 99
100config DRM_PANEL_SITRONIX_ST7789V
101 tristate "Sitronix ST7789V panel"
102 depends on OF && SPI
103 help
104 Say Y here if you want to enable support for the Sitronix
105 ST7789V controller for 240x320 LCD panels
106
94endmenu 107endmenu
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 20b5060d1f47..292b3c77aede 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -4,6 +4,8 @@ obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
4obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o 4obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
5obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o 5obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
6obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o 6obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
7obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
7obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o 8obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
8obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o 9obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
9obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o 10obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o
11obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
new file mode 100644
index 000000000000..4cc08d7b3de4
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
@@ -0,0 +1,739 @@
1/*
2 * MIPI-DSI based s6e3ha2 AMOLED 5.7 inch panel driver.
3 *
4 * Copyright (c) 2016 Samsung Electronics Co., Ltd.
5 * Donghwa Lee <dh09.lee@samsung.com>
6 * Hyungwon Hwang <human.hwang@samsung.com>
7 * Hoegeun Kwon <hoegeun.kwon@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <drm/drmP.h>
15#include <drm/drm_mipi_dsi.h>
16#include <drm/drm_panel.h>
17#include <linux/backlight.h>
18#include <linux/gpio/consumer.h>
19#include <linux/regulator/consumer.h>
20
21#define S6E3HA2_MIN_BRIGHTNESS 0
22#define S6E3HA2_MAX_BRIGHTNESS 100
23#define S6E3HA2_DEFAULT_BRIGHTNESS 80
24
25#define S6E3HA2_NUM_GAMMA_STEPS 46
26#define S6E3HA2_GAMMA_CMD_CNT 35
27#define S6E3HA2_VINT_STATUS_MAX 10
28
29static const u8 gamma_tbl[S6E3HA2_NUM_GAMMA_STEPS][S6E3HA2_GAMMA_CMD_CNT] = {
30 { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x82, 0x83,
31 0x85, 0x88, 0x8b, 0x8b, 0x84, 0x88, 0x82, 0x82, 0x89, 0x86, 0x8c,
32 0x94, 0x84, 0xb1, 0xaf, 0x8e, 0xcf, 0xad, 0xc9, 0x00, 0x00, 0x00,
33 0x00, 0x00 },
34 { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x84, 0x84,
35 0x85, 0x87, 0x8b, 0x8a, 0x84, 0x88, 0x82, 0x82, 0x89, 0x86, 0x8a,
36 0x93, 0x84, 0xb0, 0xae, 0x8e, 0xc9, 0xa8, 0xc5, 0x00, 0x00, 0x00,
37 0x00, 0x00 },
38 { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83,
39 0x85, 0x86, 0x8a, 0x8a, 0x84, 0x88, 0x81, 0x84, 0x8a, 0x88, 0x8a,
40 0x91, 0x84, 0xb1, 0xae, 0x8b, 0xd5, 0xb2, 0xcc, 0x00, 0x00, 0x00,
41 0x00, 0x00 },
42 { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83,
43 0x85, 0x86, 0x8a, 0x8a, 0x84, 0x87, 0x81, 0x84, 0x8a, 0x87, 0x8a,
44 0x91, 0x85, 0xae, 0xac, 0x8a, 0xc3, 0xa3, 0xc0, 0x00, 0x00, 0x00,
45 0x00, 0x00 },
46 { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x85, 0x85,
47 0x86, 0x85, 0x88, 0x89, 0x84, 0x89, 0x82, 0x84, 0x87, 0x85, 0x8b,
48 0x91, 0x88, 0xad, 0xab, 0x8a, 0xb7, 0x9b, 0xb6, 0x00, 0x00, 0x00,
49 0x00, 0x00 },
50 { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83,
51 0x85, 0x86, 0x89, 0x8a, 0x84, 0x89, 0x83, 0x83, 0x86, 0x84, 0x8b,
52 0x90, 0x84, 0xb0, 0xae, 0x8b, 0xce, 0xad, 0xc8, 0x00, 0x00, 0x00,
53 0x00, 0x00 },
54 { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83,
55 0x85, 0x87, 0x89, 0x8a, 0x83, 0x87, 0x82, 0x85, 0x88, 0x87, 0x89,
56 0x8f, 0x84, 0xac, 0xaa, 0x89, 0xb1, 0x98, 0xaf, 0x00, 0x00, 0x00,
57 0x00, 0x00 },
58 { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83,
59 0x85, 0x86, 0x88, 0x89, 0x84, 0x88, 0x83, 0x82, 0x85, 0x84, 0x8c,
60 0x91, 0x86, 0xac, 0xaa, 0x89, 0xc2, 0xa5, 0xbd, 0x00, 0x00, 0x00,
61 0x00, 0x00 },
62 { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
63 0x85, 0x87, 0x89, 0x8a, 0x83, 0x87, 0x82, 0x85, 0x88, 0x87, 0x88,
64 0x8b, 0x82, 0xad, 0xaa, 0x8a, 0xc2, 0xa5, 0xbd, 0x00, 0x00, 0x00,
65 0x00, 0x00 },
66 { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83,
67 0x85, 0x86, 0x87, 0x89, 0x84, 0x88, 0x83, 0x82, 0x85, 0x84, 0x8a,
68 0x8e, 0x84, 0xae, 0xac, 0x89, 0xda, 0xb7, 0xd0, 0x00, 0x00, 0x00,
69 0x00, 0x00 },
70 { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
71 0x85, 0x86, 0x87, 0x89, 0x84, 0x88, 0x83, 0x80, 0x83, 0x82, 0x8b,
72 0x8e, 0x85, 0xac, 0xaa, 0x89, 0xc8, 0xaa, 0xc1, 0x00, 0x00, 0x00,
73 0x00, 0x00 },
74 { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
75 0x85, 0x86, 0x87, 0x89, 0x81, 0x85, 0x81, 0x84, 0x86, 0x84, 0x8c,
76 0x8c, 0x84, 0xa9, 0xa8, 0x87, 0xa3, 0x92, 0xa1, 0x00, 0x00, 0x00,
77 0x00, 0x00 },
78 { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
79 0x85, 0x86, 0x87, 0x89, 0x84, 0x86, 0x83, 0x80, 0x83, 0x81, 0x8c,
80 0x8d, 0x84, 0xaa, 0xaa, 0x89, 0xce, 0xaf, 0xc5, 0x00, 0x00, 0x00,
81 0x00, 0x00 },
82 { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
83 0x85, 0x86, 0x87, 0x89, 0x81, 0x83, 0x80, 0x83, 0x85, 0x85, 0x8c,
84 0x8c, 0x84, 0xa8, 0xa8, 0x88, 0xb5, 0x9f, 0xb0, 0x00, 0x00, 0x00,
85 0x00, 0x00 },
86 { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
87 0x86, 0x86, 0x87, 0x88, 0x81, 0x83, 0x80, 0x83, 0x85, 0x85, 0x8c,
88 0x8b, 0x84, 0xab, 0xa8, 0x86, 0xd4, 0xb4, 0xc9, 0x00, 0x00, 0x00,
89 0x00, 0x00 },
90 { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
91 0x86, 0x86, 0x87, 0x88, 0x81, 0x83, 0x80, 0x84, 0x84, 0x85, 0x8b,
92 0x8a, 0x83, 0xa6, 0xa5, 0x84, 0xbb, 0xa4, 0xb3, 0x00, 0x00, 0x00,
93 0x00, 0x00 },
94 { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
95 0x86, 0x85, 0x86, 0x86, 0x82, 0x85, 0x81, 0x82, 0x83, 0x84, 0x8e,
96 0x8b, 0x83, 0xa4, 0xa3, 0x8a, 0xa1, 0x93, 0x9d, 0x00, 0x00, 0x00,
97 0x00, 0x00 },
98 { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x83, 0x83,
99 0x85, 0x86, 0x87, 0x87, 0x82, 0x85, 0x81, 0x82, 0x82, 0x84, 0x8e,
100 0x8b, 0x83, 0xa4, 0xa2, 0x86, 0xc1, 0xa9, 0xb7, 0x00, 0x00, 0x00,
101 0x00, 0x00 },
102 { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x83, 0x83,
103 0x85, 0x86, 0x87, 0x87, 0x82, 0x85, 0x81, 0x82, 0x82, 0x84, 0x8d,
104 0x89, 0x82, 0xa2, 0xa1, 0x84, 0xa7, 0x98, 0xa1, 0x00, 0x00, 0x00,
105 0x00, 0x00 },
106 { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x83, 0x83,
107 0x85, 0x86, 0x87, 0x87, 0x82, 0x85, 0x81, 0x83, 0x83, 0x85, 0x8c,
108 0x87, 0x7f, 0xa2, 0x9d, 0x88, 0x8d, 0x88, 0x8b, 0x00, 0x00, 0x00,
109 0x00, 0x00 },
110 { 0x00, 0xbb, 0x00, 0xc5, 0x00, 0xb4, 0x87, 0x86, 0x86, 0x84, 0x83,
111 0x86, 0x87, 0x87, 0x87, 0x80, 0x82, 0x7f, 0x86, 0x86, 0x88, 0x8a,
112 0x84, 0x7e, 0x9d, 0x9c, 0x82, 0x8d, 0x88, 0x8b, 0x00, 0x00, 0x00,
113 0x00, 0x00 },
114 { 0x00, 0xbd, 0x00, 0xc7, 0x00, 0xb7, 0x87, 0x85, 0x85, 0x84, 0x83,
115 0x86, 0x86, 0x86, 0x88, 0x81, 0x83, 0x80, 0x83, 0x84, 0x85, 0x8a,
116 0x85, 0x7e, 0x9c, 0x9b, 0x85, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
117 0x00, 0x00 },
118 { 0x00, 0xc0, 0x00, 0xca, 0x00, 0xbb, 0x87, 0x86, 0x85, 0x83, 0x83,
119 0x85, 0x86, 0x86, 0x88, 0x81, 0x83, 0x80, 0x84, 0x85, 0x86, 0x89,
120 0x83, 0x7d, 0x9c, 0x99, 0x87, 0x7b, 0x7b, 0x7c, 0x00, 0x00, 0x00,
121 0x00, 0x00 },
122 { 0x00, 0xc4, 0x00, 0xcd, 0x00, 0xbe, 0x87, 0x86, 0x85, 0x83, 0x83,
123 0x86, 0x85, 0x85, 0x87, 0x81, 0x82, 0x80, 0x82, 0x82, 0x83, 0x8a,
124 0x85, 0x7f, 0x9f, 0x9b, 0x86, 0xb4, 0xa1, 0xac, 0x00, 0x00, 0x00,
125 0x00, 0x00 },
126 { 0x00, 0xc7, 0x00, 0xd0, 0x00, 0xc2, 0x87, 0x85, 0x85, 0x83, 0x82,
127 0x85, 0x85, 0x85, 0x86, 0x82, 0x83, 0x80, 0x82, 0x82, 0x84, 0x87,
128 0x86, 0x80, 0x9e, 0x9a, 0x87, 0xa7, 0x98, 0xa1, 0x00, 0x00, 0x00,
129 0x00, 0x00 },
130 { 0x00, 0xca, 0x00, 0xd2, 0x00, 0xc5, 0x87, 0x85, 0x84, 0x82, 0x82,
131 0x84, 0x85, 0x85, 0x86, 0x81, 0x82, 0x7f, 0x82, 0x82, 0x84, 0x88,
132 0x86, 0x81, 0x9d, 0x98, 0x86, 0x8d, 0x88, 0x8b, 0x00, 0x00, 0x00,
133 0x00, 0x00 },
134 { 0x00, 0xce, 0x00, 0xd6, 0x00, 0xca, 0x86, 0x85, 0x84, 0x83, 0x83,
135 0x85, 0x84, 0x84, 0x85, 0x81, 0x82, 0x80, 0x81, 0x81, 0x82, 0x89,
136 0x86, 0x81, 0x9c, 0x97, 0x86, 0xa7, 0x98, 0xa1, 0x00, 0x00, 0x00,
137 0x00, 0x00 },
138 { 0x00, 0xd1, 0x00, 0xd9, 0x00, 0xce, 0x86, 0x84, 0x83, 0x83, 0x82,
139 0x85, 0x85, 0x85, 0x86, 0x81, 0x83, 0x81, 0x82, 0x82, 0x83, 0x86,
140 0x83, 0x7f, 0x99, 0x95, 0x86, 0xbb, 0xa4, 0xb3, 0x00, 0x00, 0x00,
141 0x00, 0x00 },
142 { 0x00, 0xd4, 0x00, 0xdb, 0x00, 0xd1, 0x86, 0x85, 0x83, 0x83, 0x82,
143 0x85, 0x84, 0x84, 0x85, 0x80, 0x83, 0x82, 0x80, 0x80, 0x81, 0x87,
144 0x84, 0x81, 0x98, 0x93, 0x85, 0xae, 0x9c, 0xa8, 0x00, 0x00, 0x00,
145 0x00, 0x00 },
146 { 0x00, 0xd8, 0x00, 0xde, 0x00, 0xd6, 0x86, 0x84, 0x83, 0x81, 0x81,
147 0x83, 0x85, 0x85, 0x85, 0x82, 0x83, 0x81, 0x81, 0x81, 0x83, 0x86,
148 0x84, 0x80, 0x98, 0x91, 0x85, 0x7b, 0x7b, 0x7c, 0x00, 0x00, 0x00,
149 0x00, 0x00 },
150 { 0x00, 0xdc, 0x00, 0xe2, 0x00, 0xda, 0x85, 0x84, 0x83, 0x82, 0x82,
151 0x84, 0x84, 0x84, 0x85, 0x81, 0x82, 0x82, 0x80, 0x80, 0x81, 0x83,
152 0x82, 0x7f, 0x99, 0x93, 0x86, 0x94, 0x8b, 0x92, 0x00, 0x00, 0x00,
153 0x00, 0x00 },
154 { 0x00, 0xdf, 0x00, 0xe5, 0x00, 0xde, 0x85, 0x84, 0x82, 0x82, 0x82,
155 0x84, 0x83, 0x83, 0x84, 0x81, 0x81, 0x80, 0x83, 0x82, 0x84, 0x82,
156 0x81, 0x7f, 0x99, 0x92, 0x86, 0x7b, 0x7b, 0x7c, 0x00, 0x00, 0x00,
157 0x00, 0x00 },
158 { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x81, 0x81,
159 0x82, 0x83, 0x83, 0x84, 0x80, 0x81, 0x80, 0x83, 0x83, 0x84, 0x80,
160 0x81, 0x7c, 0x99, 0x92, 0x87, 0xa1, 0x93, 0x9d, 0x00, 0x00, 0x00,
161 0x00, 0x00 },
162 { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x85, 0x84, 0x83, 0x81, 0x81,
163 0x82, 0x82, 0x82, 0x83, 0x80, 0x81, 0x80, 0x81, 0x80, 0x82, 0x83,
164 0x82, 0x80, 0x91, 0x8d, 0x83, 0x9a, 0x90, 0x96, 0x00, 0x00, 0x00,
165 0x00, 0x00 },
166 { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x81, 0x81,
167 0x82, 0x83, 0x83, 0x84, 0x80, 0x81, 0x80, 0x81, 0x80, 0x82, 0x83,
168 0x81, 0x7f, 0x91, 0x8c, 0x82, 0x8d, 0x88, 0x8b, 0x00, 0x00, 0x00,
169 0x00, 0x00 },
170 { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x81, 0x81,
171 0x82, 0x83, 0x83, 0x83, 0x82, 0x82, 0x81, 0x81, 0x80, 0x82, 0x82,
172 0x82, 0x7f, 0x94, 0x89, 0x84, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
173 0x00, 0x00 },
174 { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x81, 0x81,
175 0x82, 0x83, 0x83, 0x83, 0x82, 0x82, 0x81, 0x81, 0x80, 0x82, 0x83,
176 0x82, 0x7f, 0x91, 0x85, 0x81, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
177 0x00, 0x00 },
178 { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x81, 0x81,
179 0x82, 0x83, 0x83, 0x83, 0x80, 0x80, 0x7f, 0x83, 0x82, 0x84, 0x83,
180 0x82, 0x7f, 0x90, 0x84, 0x81, 0x9a, 0x90, 0x96, 0x00, 0x00, 0x00,
181 0x00, 0x00 },
182 { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x80, 0x80,
183 0x82, 0x83, 0x83, 0x83, 0x80, 0x80, 0x7f, 0x80, 0x80, 0x81, 0x81,
184 0x82, 0x83, 0x7e, 0x80, 0x7c, 0xa4, 0x97, 0x9f, 0x00, 0x00, 0x00,
185 0x00, 0x00 },
186 { 0x00, 0xe9, 0x00, 0xec, 0x00, 0xe8, 0x84, 0x83, 0x82, 0x81, 0x81,
187 0x82, 0x82, 0x82, 0x83, 0x7f, 0x7f, 0x7f, 0x81, 0x80, 0x82, 0x83,
188 0x83, 0x84, 0x79, 0x7c, 0x79, 0xb1, 0xa0, 0xaa, 0x00, 0x00, 0x00,
189 0x00, 0x00 },
190 { 0x00, 0xed, 0x00, 0xf0, 0x00, 0xec, 0x83, 0x83, 0x82, 0x80, 0x80,
191 0x81, 0x82, 0x82, 0x82, 0x7f, 0x7f, 0x7e, 0x81, 0x81, 0x82, 0x80,
192 0x81, 0x81, 0x84, 0x84, 0x83, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
193 0x00, 0x00 },
194 { 0x00, 0xf1, 0x00, 0xf4, 0x00, 0xf1, 0x83, 0x82, 0x82, 0x80, 0x80,
195 0x81, 0x82, 0x82, 0x82, 0x80, 0x80, 0x80, 0x80, 0x80, 0x81, 0x7d,
196 0x7e, 0x7f, 0x84, 0x84, 0x83, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
197 0x00, 0x00 },
198 { 0x00, 0xf6, 0x00, 0xf7, 0x00, 0xf5, 0x82, 0x82, 0x81, 0x80, 0x80,
199 0x80, 0x82, 0x82, 0x82, 0x80, 0x80, 0x80, 0x7f, 0x7f, 0x7f, 0x82,
200 0x82, 0x82, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
201 0x00, 0x00 },
202 { 0x00, 0xfa, 0x00, 0xfb, 0x00, 0xfa, 0x81, 0x81, 0x81, 0x80, 0x80,
203 0x80, 0x82, 0x82, 0x82, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
204 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
205 0x00, 0x00 },
206 { 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x80, 0x80, 0x80, 0x80, 0x80,
207 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
208 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
209 0x00, 0x00 },
210 { 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x80, 0x80, 0x80, 0x80, 0x80,
211 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
212 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
213 0x00, 0x00 }
214};
215
216unsigned char vint_table[S6E3HA2_VINT_STATUS_MAX] = {
217 0x18, 0x19, 0x1a, 0x1b, 0x1c,
218 0x1d, 0x1e, 0x1f, 0x20, 0x21
219};
220
221struct s6e3ha2 {
222 struct device *dev;
223 struct drm_panel panel;
224 struct backlight_device *bl_dev;
225
226 struct regulator_bulk_data supplies[2];
227 struct gpio_desc *reset_gpio;
228 struct gpio_desc *enable_gpio;
229};
230
231static int s6e3ha2_dcs_write(struct s6e3ha2 *ctx, const void *data, size_t len)
232{
233 struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
234
235 return mipi_dsi_dcs_write_buffer(dsi, data, len);
236}
237
238#define s6e3ha2_dcs_write_seq_static(ctx, seq...) do { \
239 static const u8 d[] = { seq }; \
240 int ret; \
241 ret = s6e3ha2_dcs_write(ctx, d, ARRAY_SIZE(d)); \
242 if (ret < 0) \
243 return ret; \
244} while (0)
245
246#define s6e3ha2_call_write_func(ret, func) do { \
247 ret = (func); \
248 if (ret < 0) \
249 return ret; \
250} while (0)
251
252static int s6e3ha2_test_key_on_f0(struct s6e3ha2 *ctx)
253{
254 s6e3ha2_dcs_write_seq_static(ctx, 0xf0, 0x5a, 0x5a);
255 return 0;
256}
257
258static int s6e3ha2_test_key_off_f0(struct s6e3ha2 *ctx)
259{
260 s6e3ha2_dcs_write_seq_static(ctx, 0xf0, 0xa5, 0xa5);
261 return 0;
262}
263
264static int s6e3ha2_test_key_on_fc(struct s6e3ha2 *ctx)
265{
266 s6e3ha2_dcs_write_seq_static(ctx, 0xfc, 0x5a, 0x5a);
267 return 0;
268}
269
270static int s6e3ha2_test_key_off_fc(struct s6e3ha2 *ctx)
271{
272 s6e3ha2_dcs_write_seq_static(ctx, 0xfc, 0xa5, 0xa5);
273 return 0;
274}
275
276static int s6e3ha2_single_dsi_set(struct s6e3ha2 *ctx)
277{
278 s6e3ha2_dcs_write_seq_static(ctx, 0xf2, 0x67);
279 s6e3ha2_dcs_write_seq_static(ctx, 0xf9, 0x09);
280 return 0;
281}
282
283static int s6e3ha2_freq_calibration(struct s6e3ha2 *ctx)
284{
285 s6e3ha2_dcs_write_seq_static(ctx, 0xfd, 0x1c);
286 s6e3ha2_dcs_write_seq_static(ctx, 0xfe, 0x20, 0x39);
287 s6e3ha2_dcs_write_seq_static(ctx, 0xfe, 0xa0);
288 s6e3ha2_dcs_write_seq_static(ctx, 0xfe, 0x20);
289 s6e3ha2_dcs_write_seq_static(ctx, 0xce, 0x03, 0x3b, 0x12, 0x62, 0x40,
290 0x80, 0xc0, 0x28, 0x28, 0x28, 0x28, 0x39, 0xc5);
291 return 0;
292}
293
294static int s6e3ha2_aor_control(struct s6e3ha2 *ctx)
295{
296 s6e3ha2_dcs_write_seq_static(ctx, 0xb2, 0x03, 0x10);
297 return 0;
298}
299
300static int s6e3ha2_caps_elvss_set(struct s6e3ha2 *ctx)
301{
302 s6e3ha2_dcs_write_seq_static(ctx, 0xb6, 0x9c, 0x0a);
303 return 0;
304}
305
306static int s6e3ha2_acl_off(struct s6e3ha2 *ctx)
307{
308 s6e3ha2_dcs_write_seq_static(ctx, 0x55, 0x00);
309 return 0;
310}
311
312static int s6e3ha2_acl_off_opr(struct s6e3ha2 *ctx)
313{
314 s6e3ha2_dcs_write_seq_static(ctx, 0xb5, 0x40);
315 return 0;
316}
317
318static int s6e3ha2_test_global(struct s6e3ha2 *ctx)
319{
320 s6e3ha2_dcs_write_seq_static(ctx, 0xb0, 0x07);
321 return 0;
322}
323
324static int s6e3ha2_test(struct s6e3ha2 *ctx)
325{
326 s6e3ha2_dcs_write_seq_static(ctx, 0xb8, 0x19);
327 return 0;
328}
329
330static int s6e3ha2_touch_hsync_on1(struct s6e3ha2 *ctx)
331{
332 s6e3ha2_dcs_write_seq_static(ctx, 0xbd, 0x33, 0x11, 0x02,
333 0x16, 0x02, 0x16);
334 return 0;
335}
336
337static int s6e3ha2_pentile_control(struct s6e3ha2 *ctx)
338{
339 s6e3ha2_dcs_write_seq_static(ctx, 0xc0, 0x00, 0x00, 0xd8, 0xd8);
340 return 0;
341}
342
343static int s6e3ha2_poc_global(struct s6e3ha2 *ctx)
344{
345 s6e3ha2_dcs_write_seq_static(ctx, 0xb0, 0x20);
346 return 0;
347}
348
349static int s6e3ha2_poc_setting(struct s6e3ha2 *ctx)
350{
351 s6e3ha2_dcs_write_seq_static(ctx, 0xfe, 0x08);
352 return 0;
353}
354
355static int s6e3ha2_pcd_set_off(struct s6e3ha2 *ctx)
356{
357 s6e3ha2_dcs_write_seq_static(ctx, 0xcc, 0x40, 0x51);
358 return 0;
359}
360
361static int s6e3ha2_err_fg_set(struct s6e3ha2 *ctx)
362{
363 s6e3ha2_dcs_write_seq_static(ctx, 0xed, 0x44);
364 return 0;
365}
366
367static int s6e3ha2_hbm_off(struct s6e3ha2 *ctx)
368{
369 s6e3ha2_dcs_write_seq_static(ctx, 0x53, 0x00);
370 return 0;
371}
372
373static int s6e3ha2_te_start_setting(struct s6e3ha2 *ctx)
374{
375 s6e3ha2_dcs_write_seq_static(ctx, 0xb9, 0x10, 0x09, 0xff, 0x00, 0x09);
376 return 0;
377}
378
379static int s6e3ha2_gamma_update(struct s6e3ha2 *ctx)
380{
381 s6e3ha2_dcs_write_seq_static(ctx, 0xf7, 0x03);
382 ndelay(100); /* need for 100ns delay */
383 s6e3ha2_dcs_write_seq_static(ctx, 0xf7, 0x00);
384 return 0;
385}
386
387static int s6e3ha2_get_brightness(struct backlight_device *bl_dev)
388{
389 return bl_dev->props.brightness;
390}
391
392static int s6e3ha2_set_vint(struct s6e3ha2 *ctx)
393{
394 struct backlight_device *bl_dev = ctx->bl_dev;
395 unsigned int brightness = bl_dev->props.brightness;
396 unsigned char data[] = { 0xf4, 0x8b,
397 vint_table[brightness * (S6E3HA2_VINT_STATUS_MAX - 1) /
398 S6E3HA2_MAX_BRIGHTNESS] };
399
400 return s6e3ha2_dcs_write(ctx, data, ARRAY_SIZE(data));
401}
402
403static unsigned int s6e3ha2_get_brightness_index(unsigned int brightness)
404{
405 return (brightness * (S6E3HA2_NUM_GAMMA_STEPS - 1)) /
406 S6E3HA2_MAX_BRIGHTNESS;
407}
408
409static int s6e3ha2_update_gamma(struct s6e3ha2 *ctx, unsigned int brightness)
410{
411 struct backlight_device *bl_dev = ctx->bl_dev;
412 unsigned int index = s6e3ha2_get_brightness_index(brightness);
413 u8 data[S6E3HA2_GAMMA_CMD_CNT + 1] = { 0xca, };
414 int ret;
415
416 memcpy(data + 1, gamma_tbl + index, S6E3HA2_GAMMA_CMD_CNT);
417 s6e3ha2_call_write_func(ret,
418 s6e3ha2_dcs_write(ctx, data, ARRAY_SIZE(data)));
419
420 s6e3ha2_call_write_func(ret, s6e3ha2_gamma_update(ctx));
421 bl_dev->props.brightness = brightness;
422
423 return 0;
424}
425
426static int s6e3ha2_set_brightness(struct backlight_device *bl_dev)
427{
428 struct s6e3ha2 *ctx = bl_get_data(bl_dev);
429 unsigned int brightness = bl_dev->props.brightness;
430 int ret;
431
432 if (brightness < S6E3HA2_MIN_BRIGHTNESS ||
433 brightness > bl_dev->props.max_brightness) {
434 dev_err(ctx->dev, "Invalid brightness: %u\n", brightness);
435 return -EINVAL;
436 }
437
438 if (bl_dev->props.power > FB_BLANK_NORMAL)
439 return -EPERM;
440
441 s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_f0(ctx));
442 s6e3ha2_call_write_func(ret, s6e3ha2_update_gamma(ctx, brightness));
443 s6e3ha2_call_write_func(ret, s6e3ha2_aor_control(ctx));
444 s6e3ha2_call_write_func(ret, s6e3ha2_set_vint(ctx));
445 s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_f0(ctx));
446
447 return 0;
448}
449
450static const struct backlight_ops s6e3ha2_bl_ops = {
451 .get_brightness = s6e3ha2_get_brightness,
452 .update_status = s6e3ha2_set_brightness,
453};
454
455static int s6e3ha2_panel_init(struct s6e3ha2 *ctx)
456{
457 struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
458 int ret;
459
460 s6e3ha2_call_write_func(ret, mipi_dsi_dcs_exit_sleep_mode(dsi));
461 usleep_range(5000, 6000);
462
463 s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_f0(ctx));
464 s6e3ha2_call_write_func(ret, s6e3ha2_single_dsi_set(ctx));
465 s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_fc(ctx));
466 s6e3ha2_call_write_func(ret, s6e3ha2_freq_calibration(ctx));
467 s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_fc(ctx));
468 s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_f0(ctx));
469
470 return 0;
471}
472
473static int s6e3ha2_power_off(struct s6e3ha2 *ctx)
474{
475 return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
476}
477
478static int s6e3ha2_disable(struct drm_panel *panel)
479{
480 struct s6e3ha2 *ctx = container_of(panel, struct s6e3ha2, panel);
481 struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
482 int ret;
483
484 s6e3ha2_call_write_func(ret, mipi_dsi_dcs_enter_sleep_mode(dsi));
485 s6e3ha2_call_write_func(ret, mipi_dsi_dcs_set_display_off(dsi));
486
487 msleep(40);
488 ctx->bl_dev->props.power = FB_BLANK_NORMAL;
489
490 return 0;
491}
492
493static int s6e3ha2_unprepare(struct drm_panel *panel)
494{
495 struct s6e3ha2 *ctx = container_of(panel, struct s6e3ha2, panel);
496
497 return s6e3ha2_power_off(ctx);
498}
499
500static int s6e3ha2_power_on(struct s6e3ha2 *ctx)
501{
502 int ret;
503
504 ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
505 if (ret < 0)
506 return ret;
507
508 msleep(120);
509
510 gpiod_set_value(ctx->enable_gpio, 0);
511 usleep_range(5000, 6000);
512 gpiod_set_value(ctx->enable_gpio, 1);
513
514 gpiod_set_value(ctx->reset_gpio, 1);
515 usleep_range(5000, 6000);
516 gpiod_set_value(ctx->reset_gpio, 0);
517 usleep_range(5000, 6000);
518
519 return 0;
520}
521static int s6e3ha2_prepare(struct drm_panel *panel)
522{
523 struct s6e3ha2 *ctx = container_of(panel, struct s6e3ha2, panel);
524 int ret;
525
526 ret = s6e3ha2_power_on(ctx);
527 if (ret < 0)
528 return ret;
529
530 ret = s6e3ha2_panel_init(ctx);
531 if (ret < 0)
532 goto err;
533
534 ctx->bl_dev->props.power = FB_BLANK_NORMAL;
535
536 return 0;
537
538err:
539 s6e3ha2_power_off(ctx);
540 return ret;
541}
542
543static int s6e3ha2_enable(struct drm_panel *panel)
544{
545 struct s6e3ha2 *ctx = container_of(panel, struct s6e3ha2, panel);
546 struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
547 int ret;
548
549 /* common setting */
550 s6e3ha2_call_write_func(ret,
551 mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK));
552
553 s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_f0(ctx));
554 s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_fc(ctx));
555 s6e3ha2_call_write_func(ret, s6e3ha2_touch_hsync_on1(ctx));
556 s6e3ha2_call_write_func(ret, s6e3ha2_pentile_control(ctx));
557 s6e3ha2_call_write_func(ret, s6e3ha2_poc_global(ctx));
558 s6e3ha2_call_write_func(ret, s6e3ha2_poc_setting(ctx));
559 s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_fc(ctx));
560
561 /* pcd setting off for TB */
562 s6e3ha2_call_write_func(ret, s6e3ha2_pcd_set_off(ctx));
563 s6e3ha2_call_write_func(ret, s6e3ha2_err_fg_set(ctx));
564 s6e3ha2_call_write_func(ret, s6e3ha2_te_start_setting(ctx));
565
566 /* brightness setting */
567 s6e3ha2_call_write_func(ret, s6e3ha2_set_brightness(ctx->bl_dev));
568 s6e3ha2_call_write_func(ret, s6e3ha2_aor_control(ctx));
569 s6e3ha2_call_write_func(ret, s6e3ha2_caps_elvss_set(ctx));
570 s6e3ha2_call_write_func(ret, s6e3ha2_gamma_update(ctx));
571 s6e3ha2_call_write_func(ret, s6e3ha2_acl_off(ctx));
572 s6e3ha2_call_write_func(ret, s6e3ha2_acl_off_opr(ctx));
573 s6e3ha2_call_write_func(ret, s6e3ha2_hbm_off(ctx));
574
575 /* elvss temp compensation */
576 s6e3ha2_call_write_func(ret, s6e3ha2_test_global(ctx));
577 s6e3ha2_call_write_func(ret, s6e3ha2_test(ctx));
578 s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_f0(ctx));
579
580 s6e3ha2_call_write_func(ret, mipi_dsi_dcs_set_display_on(dsi));
581 ctx->bl_dev->props.power = FB_BLANK_UNBLANK;
582
583 return 0;
584}
585
586static const struct drm_display_mode default_mode = {
587 .clock = 222372,
588 .hdisplay = 1440,
589 .hsync_start = 1440 + 1,
590 .hsync_end = 1440 + 1 + 1,
591 .htotal = 1440 + 1 + 1 + 1,
592 .vdisplay = 2560,
593 .vsync_start = 2560 + 1,
594 .vsync_end = 2560 + 1 + 1,
595 .vtotal = 2560 + 1 + 1 + 15,
596 .vrefresh = 60,
597 .flags = 0,
598};
599
600static int s6e3ha2_get_modes(struct drm_panel *panel)
601{
602 struct drm_connector *connector = panel->connector;
603 struct drm_display_mode *mode;
604
605 mode = drm_mode_duplicate(panel->drm, &default_mode);
606 if (!mode) {
607 DRM_ERROR("failed to add mode %ux%ux@%u\n",
608 default_mode.hdisplay, default_mode.vdisplay,
609 default_mode.vrefresh);
610 return -ENOMEM;
611 }
612
613 drm_mode_set_name(mode);
614
615 mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
616 drm_mode_probed_add(connector, mode);
617
618 connector->display_info.width_mm = 71;
619 connector->display_info.height_mm = 125;
620
621 return 1;
622}
623
624static const struct drm_panel_funcs s6e3ha2_drm_funcs = {
625 .disable = s6e3ha2_disable,
626 .unprepare = s6e3ha2_unprepare,
627 .prepare = s6e3ha2_prepare,
628 .enable = s6e3ha2_enable,
629 .get_modes = s6e3ha2_get_modes,
630};
631
632static int s6e3ha2_probe(struct mipi_dsi_device *dsi)
633{
634 struct device *dev = &dsi->dev;
635 struct s6e3ha2 *ctx;
636 int ret;
637
638 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
639 if (!ctx)
640 return -ENOMEM;
641
642 mipi_dsi_set_drvdata(dsi, ctx);
643
644 ctx->dev = dev;
645
646 dsi->lanes = 4;
647 dsi->format = MIPI_DSI_FMT_RGB888;
648 dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
649
650 ctx->supplies[0].supply = "vdd3";
651 ctx->supplies[1].supply = "vci";
652
653 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
654 ctx->supplies);
655 if (ret < 0) {
656 dev_err(dev, "failed to get regulators: %d\n", ret);
657 return ret;
658 }
659
660 ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
661 if (IS_ERR(ctx->reset_gpio)) {
662 dev_err(dev, "cannot get reset-gpios %ld\n",
663 PTR_ERR(ctx->reset_gpio));
664 return PTR_ERR(ctx->reset_gpio);
665 }
666
667 ctx->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_HIGH);
668 if (IS_ERR(ctx->enable_gpio)) {
669 dev_err(dev, "cannot get enable-gpios %ld\n",
670 PTR_ERR(ctx->enable_gpio));
671 return PTR_ERR(ctx->enable_gpio);
672 }
673
674 ctx->bl_dev = backlight_device_register("s6e3ha2", dev, ctx,
675 &s6e3ha2_bl_ops, NULL);
676 if (IS_ERR(ctx->bl_dev)) {
677 dev_err(dev, "failed to register backlight device\n");
678 return PTR_ERR(ctx->bl_dev);
679 }
680
681 ctx->bl_dev->props.max_brightness = S6E3HA2_MAX_BRIGHTNESS;
682 ctx->bl_dev->props.brightness = S6E3HA2_DEFAULT_BRIGHTNESS;
683 ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
684
685 drm_panel_init(&ctx->panel);
686 ctx->panel.dev = dev;
687 ctx->panel.funcs = &s6e3ha2_drm_funcs;
688
689 ret = drm_panel_add(&ctx->panel);
690 if (ret < 0)
691 goto unregister_backlight;
692
693 ret = mipi_dsi_attach(dsi);
694 if (ret < 0)
695 goto remove_panel;
696
697 return ret;
698
699remove_panel:
700 drm_panel_remove(&ctx->panel);
701
702unregister_backlight:
703 backlight_device_unregister(ctx->bl_dev);
704
705 return ret;
706}
707
708static int s6e3ha2_remove(struct mipi_dsi_device *dsi)
709{
710 struct s6e3ha2 *ctx = mipi_dsi_get_drvdata(dsi);
711
712 mipi_dsi_detach(dsi);
713 drm_panel_remove(&ctx->panel);
714 backlight_device_unregister(ctx->bl_dev);
715
716 return 0;
717}
718
719static const struct of_device_id s6e3ha2_of_match[] = {
720 { .compatible = "samsung,s6e3ha2" },
721 { }
722};
723MODULE_DEVICE_TABLE(of, s6e3ha2_of_match);
724
725static struct mipi_dsi_driver s6e3ha2_driver = {
726 .probe = s6e3ha2_probe,
727 .remove = s6e3ha2_remove,
728 .driver = {
729 .name = "panel-samsung-s6e3ha2",
730 .of_match_table = s6e3ha2_of_match,
731 },
732};
733module_mipi_dsi_driver(s6e3ha2_driver);
734
735MODULE_AUTHOR("Donghwa Lee <dh09.lee@samsung.com>");
736MODULE_AUTHOR("Hyungwon Hwang <human.hwang@samsung.com>");
737MODULE_AUTHOR("Hoegeun Kwon <hoegeun.kwon@samsung.com>");
738MODULE_DESCRIPTION("MIPI-DSI based s6e3ha2 AMOLED Panel Driver");
739MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 89eb0422821c..c4566ce8fda7 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -386,6 +386,31 @@ static void panel_simple_shutdown(struct device *dev)
386 panel_simple_disable(&panel->base); 386 panel_simple_disable(&panel->base);
387} 387}
388 388
389static const struct drm_display_mode ampire_am_480272h3tmqw_t01h_mode = {
390 .clock = 9000,
391 .hdisplay = 480,
392 .hsync_start = 480 + 2,
393 .hsync_end = 480 + 2 + 41,
394 .htotal = 480 + 2 + 41 + 2,
395 .vdisplay = 272,
396 .vsync_start = 272 + 2,
397 .vsync_end = 272 + 2 + 10,
398 .vtotal = 272 + 2 + 10 + 2,
399 .vrefresh = 60,
400 .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
401};
402
403static const struct panel_desc ampire_am_480272h3tmqw_t01h = {
404 .modes = &ampire_am_480272h3tmqw_t01h_mode,
405 .num_modes = 1,
406 .bpc = 8,
407 .size = {
408 .width = 105,
409 .height = 67,
410 },
411 .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
412};
413
389static const struct drm_display_mode ampire_am800480r3tmqwa1h_mode = { 414static const struct drm_display_mode ampire_am800480r3tmqwa1h_mode = {
390 .clock = 33333, 415 .clock = 33333,
391 .hdisplay = 800, 416 .hdisplay = 800,
@@ -1806,8 +1831,36 @@ static const struct panel_desc urt_umsh_8596md_parallel = {
1806 .bus_format = MEDIA_BUS_FMT_RGB666_1X18, 1831 .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
1807}; 1832};
1808 1833
1834static const struct drm_display_mode winstar_wf35ltiacd_mode = {
1835 .clock = 6410,
1836 .hdisplay = 320,
1837 .hsync_start = 320 + 20,
1838 .hsync_end = 320 + 20 + 30,
1839 .htotal = 320 + 20 + 30 + 38,
1840 .vdisplay = 240,
1841 .vsync_start = 240 + 4,
1842 .vsync_end = 240 + 4 + 3,
1843 .vtotal = 240 + 4 + 3 + 15,
1844 .vrefresh = 60,
1845 .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
1846};
1847
1848static const struct panel_desc winstar_wf35ltiacd = {
1849 .modes = &winstar_wf35ltiacd_mode,
1850 .num_modes = 1,
1851 .bpc = 8,
1852 .size = {
1853 .width = 70,
1854 .height = 53,
1855 },
1856 .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
1857};
1858
1809static const struct of_device_id platform_of_match[] = { 1859static const struct of_device_id platform_of_match[] = {
1810 { 1860 {
1861 .compatible = "ampire,am-480272h3tmqw-t01h",
1862 .data = &ampire_am_480272h3tmqw_t01h,
1863 }, {
1811 .compatible = "ampire,am800480r3tmqwa1h", 1864 .compatible = "ampire,am800480r3tmqwa1h",
1812 .data = &ampire_am800480r3tmqwa1h, 1865 .data = &ampire_am800480r3tmqwa1h,
1813 }, { 1866 }, {
@@ -1994,6 +2047,9 @@ static const struct of_device_id platform_of_match[] = {
1994 .compatible = "urt,umsh-8596md-20t", 2047 .compatible = "urt,umsh-8596md-20t",
1995 .data = &urt_umsh_8596md_parallel, 2048 .data = &urt_umsh_8596md_parallel,
1996 }, { 2049 }, {
2050 .compatible = "winstar,wf35ltiacd",
2051 .data = &winstar_wf35ltiacd,
2052 }, {
1997 /* sentinel */ 2053 /* sentinel */
1998 } 2054 }
1999}; 2055};
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
new file mode 100644
index 000000000000..358c64ef1922
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -0,0 +1,449 @@
1/*
2 * Copyright (C) 2017 Free Electrons
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
7 */
8
9#include <linux/gpio/consumer.h>
10#include <linux/regulator/consumer.h>
11#include <linux/spi/spi.h>
12
13#include <drm/drmP.h>
14#include <drm/drm_panel.h>
15
16#include <video/mipi_display.h>
17
18#define ST7789V_COLMOD_RGB_FMT_18BITS (6 << 4)
19#define ST7789V_COLMOD_CTRL_FMT_18BITS (6 << 0)
20
21#define ST7789V_RAMCTRL_CMD 0xb0
22#define ST7789V_RAMCTRL_RM_RGB BIT(4)
23#define ST7789V_RAMCTRL_DM_RGB BIT(0)
24#define ST7789V_RAMCTRL_MAGIC (3 << 6)
25#define ST7789V_RAMCTRL_EPF(n) (((n) & 3) << 4)
26
27#define ST7789V_RGBCTRL_CMD 0xb1
28#define ST7789V_RGBCTRL_WO BIT(7)
29#define ST7789V_RGBCTRL_RCM(n) (((n) & 3) << 5)
30#define ST7789V_RGBCTRL_VSYNC_HIGH BIT(3)
31#define ST7789V_RGBCTRL_HSYNC_HIGH BIT(2)
32#define ST7789V_RGBCTRL_PCLK_HIGH BIT(1)
33#define ST7789V_RGBCTRL_VBP(n) ((n) & 0x7f)
34#define ST7789V_RGBCTRL_HBP(n) ((n) & 0x1f)
35
36#define ST7789V_PORCTRL_CMD 0xb2
37#define ST7789V_PORCTRL_IDLE_BP(n) (((n) & 0xf) << 4)
38#define ST7789V_PORCTRL_IDLE_FP(n) ((n) & 0xf)
39#define ST7789V_PORCTRL_PARTIAL_BP(n) (((n) & 0xf) << 4)
40#define ST7789V_PORCTRL_PARTIAL_FP(n) ((n) & 0xf)
41
42#define ST7789V_GCTRL_CMD 0xb7
43#define ST7789V_GCTRL_VGHS(n) (((n) & 7) << 4)
44#define ST7789V_GCTRL_VGLS(n) ((n) & 7)
45
46#define ST7789V_VCOMS_CMD 0xbb
47
48#define ST7789V_LCMCTRL_CMD 0xc0
49#define ST7789V_LCMCTRL_XBGR BIT(5)
50#define ST7789V_LCMCTRL_XMX BIT(3)
51#define ST7789V_LCMCTRL_XMH BIT(2)
52
53#define ST7789V_VDVVRHEN_CMD 0xc2
54#define ST7789V_VDVVRHEN_CMDEN BIT(0)
55
56#define ST7789V_VRHS_CMD 0xc3
57
58#define ST7789V_VDVS_CMD 0xc4
59
60#define ST7789V_FRCTRL2_CMD 0xc6
61
62#define ST7789V_PWCTRL1_CMD 0xd0
63#define ST7789V_PWCTRL1_MAGIC 0xa4
64#define ST7789V_PWCTRL1_AVDD(n) (((n) & 3) << 6)
65#define ST7789V_PWCTRL1_AVCL(n) (((n) & 3) << 4)
66#define ST7789V_PWCTRL1_VDS(n) ((n) & 3)
67
68#define ST7789V_PVGAMCTRL_CMD 0xe0
69#define ST7789V_PVGAMCTRL_JP0(n) (((n) & 3) << 4)
70#define ST7789V_PVGAMCTRL_JP1(n) (((n) & 3) << 4)
71#define ST7789V_PVGAMCTRL_VP0(n) ((n) & 0xf)
72#define ST7789V_PVGAMCTRL_VP1(n) ((n) & 0x3f)
73#define ST7789V_PVGAMCTRL_VP2(n) ((n) & 0x3f)
74#define ST7789V_PVGAMCTRL_VP4(n) ((n) & 0x1f)
75#define ST7789V_PVGAMCTRL_VP6(n) ((n) & 0x1f)
76#define ST7789V_PVGAMCTRL_VP13(n) ((n) & 0xf)
77#define ST7789V_PVGAMCTRL_VP20(n) ((n) & 0x7f)
78#define ST7789V_PVGAMCTRL_VP27(n) ((n) & 7)
79#define ST7789V_PVGAMCTRL_VP36(n) (((n) & 7) << 4)
80#define ST7789V_PVGAMCTRL_VP43(n) ((n) & 0x7f)
81#define ST7789V_PVGAMCTRL_VP50(n) ((n) & 0xf)
82#define ST7789V_PVGAMCTRL_VP57(n) ((n) & 0x1f)
83#define ST7789V_PVGAMCTRL_VP59(n) ((n) & 0x1f)
84#define ST7789V_PVGAMCTRL_VP61(n) ((n) & 0x3f)
85#define ST7789V_PVGAMCTRL_VP62(n) ((n) & 0x3f)
86#define ST7789V_PVGAMCTRL_VP63(n) (((n) & 0xf) << 4)
87
88#define ST7789V_NVGAMCTRL_CMD 0xe1
89#define ST7789V_NVGAMCTRL_JN0(n) (((n) & 3) << 4)
90#define ST7789V_NVGAMCTRL_JN1(n) (((n) & 3) << 4)
91#define ST7789V_NVGAMCTRL_VN0(n) ((n) & 0xf)
92#define ST7789V_NVGAMCTRL_VN1(n) ((n) & 0x3f)
93#define ST7789V_NVGAMCTRL_VN2(n) ((n) & 0x3f)
94#define ST7789V_NVGAMCTRL_VN4(n) ((n) & 0x1f)
95#define ST7789V_NVGAMCTRL_VN6(n) ((n) & 0x1f)
96#define ST7789V_NVGAMCTRL_VN13(n) ((n) & 0xf)
97#define ST7789V_NVGAMCTRL_VN20(n) ((n) & 0x7f)
98#define ST7789V_NVGAMCTRL_VN27(n) ((n) & 7)
99#define ST7789V_NVGAMCTRL_VN36(n) (((n) & 7) << 4)
100#define ST7789V_NVGAMCTRL_VN43(n) ((n) & 0x7f)
101#define ST7789V_NVGAMCTRL_VN50(n) ((n) & 0xf)
102#define ST7789V_NVGAMCTRL_VN57(n) ((n) & 0x1f)
103#define ST7789V_NVGAMCTRL_VN59(n) ((n) & 0x1f)
104#define ST7789V_NVGAMCTRL_VN61(n) ((n) & 0x3f)
105#define ST7789V_NVGAMCTRL_VN62(n) ((n) & 0x3f)
106#define ST7789V_NVGAMCTRL_VN63(n) (((n) & 0xf) << 4)
107
108#define ST7789V_TEST(val, func) \
109 do { \
110 if ((val = (func))) \
111 return val; \
112 } while (0)
113
114struct st7789v {
115 struct drm_panel panel;
116 struct spi_device *spi;
117 struct gpio_desc *reset;
118 struct backlight_device *backlight;
119 struct regulator *power;
120};
121
122enum st7789v_prefix {
123 ST7789V_COMMAND = 0,
124 ST7789V_DATA = 1,
125};
126
127static inline struct st7789v *panel_to_st7789v(struct drm_panel *panel)
128{
129 return container_of(panel, struct st7789v, panel);
130}
131
132static int st7789v_spi_write(struct st7789v *ctx, enum st7789v_prefix prefix,
133 u8 data)
134{
135 struct spi_transfer xfer = { };
136 struct spi_message msg;
137 u16 txbuf = ((prefix & 1) << 8) | data;
138
139 spi_message_init(&msg);
140
141 xfer.tx_buf = &txbuf;
142 xfer.bits_per_word = 9;
143 xfer.len = sizeof(txbuf);
144
145 spi_message_add_tail(&xfer, &msg);
146 return spi_sync(ctx->spi, &msg);
147}
148
149static int st7789v_write_command(struct st7789v *ctx, u8 cmd)
150{
151 return st7789v_spi_write(ctx, ST7789V_COMMAND, cmd);
152}
153
154static int st7789v_write_data(struct st7789v *ctx, u8 cmd)
155{
156 return st7789v_spi_write(ctx, ST7789V_DATA, cmd);
157}
158
159static const struct drm_display_mode default_mode = {
160 .clock = 7000,
161 .hdisplay = 240,
162 .hsync_start = 240 + 38,
163 .hsync_end = 240 + 38 + 10,
164 .htotal = 240 + 38 + 10 + 10,
165 .vdisplay = 320,
166 .vsync_start = 320 + 8,
167 .vsync_end = 320 + 8 + 4,
168 .vtotal = 320 + 8 + 4 + 4,
169 .vrefresh = 60,
170};
171
172static int st7789v_get_modes(struct drm_panel *panel)
173{
174 struct drm_connector *connector = panel->connector;
175 struct drm_display_mode *mode;
176
177 mode = drm_mode_duplicate(panel->drm, &default_mode);
178 if (!mode) {
179 dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
180 default_mode.hdisplay, default_mode.vdisplay,
181 default_mode.vrefresh);
182 return -ENOMEM;
183 }
184
185 drm_mode_set_name(mode);
186
187 mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
188 drm_mode_probed_add(connector, mode);
189
190 panel->connector->display_info.width_mm = 61;
191 panel->connector->display_info.height_mm = 103;
192
193 return 1;
194}
195
196static int st7789v_prepare(struct drm_panel *panel)
197{
198 struct st7789v *ctx = panel_to_st7789v(panel);
199 int ret;
200
201 ret = regulator_enable(ctx->power);
202 if (ret)
203 return ret;
204
205 gpiod_set_value(ctx->reset, 1);
206 msleep(30);
207 gpiod_set_value(ctx->reset, 0);
208 msleep(120);
209
210 ST7789V_TEST(ret, st7789v_write_command(ctx, MIPI_DCS_EXIT_SLEEP_MODE));
211
212 /* We need to wait 120ms after a sleep out command */
213 msleep(120);
214
215 ST7789V_TEST(ret, st7789v_write_command(ctx,
216 MIPI_DCS_SET_ADDRESS_MODE));
217 ST7789V_TEST(ret, st7789v_write_data(ctx, 0));
218
219 ST7789V_TEST(ret, st7789v_write_command(ctx,
220 MIPI_DCS_SET_PIXEL_FORMAT));
221 ST7789V_TEST(ret, st7789v_write_data(ctx,
222 (MIPI_DCS_PIXEL_FMT_18BIT << 4) |
223 (MIPI_DCS_PIXEL_FMT_18BIT)));
224
225 ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_PORCTRL_CMD));
226 ST7789V_TEST(ret, st7789v_write_data(ctx, 0xc));
227 ST7789V_TEST(ret, st7789v_write_data(ctx, 0xc));
228 ST7789V_TEST(ret, st7789v_write_data(ctx, 0));
229 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PORCTRL_IDLE_BP(3) |
230 ST7789V_PORCTRL_IDLE_FP(3)));
231 ST7789V_TEST(ret, st7789v_write_data(ctx,
232 ST7789V_PORCTRL_PARTIAL_BP(3) |
233 ST7789V_PORCTRL_PARTIAL_FP(3)));
234
235 ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_GCTRL_CMD));
236 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_GCTRL_VGLS(5) |
237 ST7789V_GCTRL_VGHS(3)));
238
239 ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_VCOMS_CMD));
240 ST7789V_TEST(ret, st7789v_write_data(ctx, 0x2b));
241
242 ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_LCMCTRL_CMD));
243 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_LCMCTRL_XMH |
244 ST7789V_LCMCTRL_XMX |
245 ST7789V_LCMCTRL_XBGR));
246
247 ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_VDVVRHEN_CMD));
248 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_VDVVRHEN_CMDEN));
249
250 ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_VRHS_CMD));
251 ST7789V_TEST(ret, st7789v_write_data(ctx, 0xf));
252
253 ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_VDVS_CMD));
254 ST7789V_TEST(ret, st7789v_write_data(ctx, 0x20));
255
256 ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_FRCTRL2_CMD));
257 ST7789V_TEST(ret, st7789v_write_data(ctx, 0xf));
258
259 ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_PWCTRL1_CMD));
260 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PWCTRL1_MAGIC));
261 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PWCTRL1_AVDD(2) |
262 ST7789V_PWCTRL1_AVCL(2) |
263 ST7789V_PWCTRL1_VDS(1)));
264
265 ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_PVGAMCTRL_CMD));
266 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP63(0xd)));
267 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP1(0xca)));
268 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP2(0xe)));
269 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP4(8)));
270 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP6(9)));
271 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP13(7)));
272 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP20(0x2d)));
273 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP27(0xb) |
274 ST7789V_PVGAMCTRL_VP36(3)));
275 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP43(0x3d)));
276 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_JP1(3) |
277 ST7789V_PVGAMCTRL_VP50(4)));
278 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP57(0xa)));
279 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP59(0xa)));
280 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP61(0x1b)));
281 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP62(0x28)));
282
283 ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_NVGAMCTRL_CMD));
284 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN63(0xd)));
285 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN1(0xca)));
286 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN2(0xf)));
287 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN4(8)));
288 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN6(8)));
289 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN13(7)));
290 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN20(0x2e)));
291 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN27(0xc) |
292 ST7789V_NVGAMCTRL_VN36(5)));
293 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN43(0x40)));
294 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_JN1(3) |
295 ST7789V_NVGAMCTRL_VN50(4)));
296 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN57(9)));
297 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN59(0xb)));
298 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN61(0x1b)));
299 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN62(0x28)));
300
301 ST7789V_TEST(ret, st7789v_write_command(ctx, MIPI_DCS_ENTER_INVERT_MODE));
302
303 ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_RAMCTRL_CMD));
304 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RAMCTRL_DM_RGB |
305 ST7789V_RAMCTRL_RM_RGB));
306 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RAMCTRL_EPF(3) |
307 ST7789V_RAMCTRL_MAGIC));
308
309 ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_RGBCTRL_CMD));
310 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RGBCTRL_WO |
311 ST7789V_RGBCTRL_RCM(2) |
312 ST7789V_RGBCTRL_VSYNC_HIGH |
313 ST7789V_RGBCTRL_HSYNC_HIGH |
314 ST7789V_RGBCTRL_PCLK_HIGH));
315 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RGBCTRL_VBP(8)));
316 ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RGBCTRL_HBP(20)));
317
318 return 0;
319}
320
321static int st7789v_enable(struct drm_panel *panel)
322{
323 struct st7789v *ctx = panel_to_st7789v(panel);
324
325 if (ctx->backlight) {
326 ctx->backlight->props.state &= ~BL_CORE_FBBLANK;
327 ctx->backlight->props.power = FB_BLANK_UNBLANK;
328 backlight_update_status(ctx->backlight);
329 }
330
331 return st7789v_write_command(ctx, MIPI_DCS_SET_DISPLAY_ON);
332}
333
334static int st7789v_disable(struct drm_panel *panel)
335{
336 struct st7789v *ctx = panel_to_st7789v(panel);
337 int ret;
338
339 ST7789V_TEST(ret, st7789v_write_command(ctx, MIPI_DCS_SET_DISPLAY_OFF));
340
341 if (ctx->backlight) {
342 ctx->backlight->props.power = FB_BLANK_POWERDOWN;
343 ctx->backlight->props.state |= BL_CORE_FBBLANK;
344 backlight_update_status(ctx->backlight);
345 }
346
347 return 0;
348}
349
350static int st7789v_unprepare(struct drm_panel *panel)
351{
352 struct st7789v *ctx = panel_to_st7789v(panel);
353 int ret;
354
355 ST7789V_TEST(ret, st7789v_write_command(ctx, MIPI_DCS_ENTER_SLEEP_MODE));
356
357 regulator_disable(ctx->power);
358
359 return 0;
360}
361
362static const struct drm_panel_funcs st7789v_drm_funcs = {
363 .disable = st7789v_disable,
364 .enable = st7789v_enable,
365 .get_modes = st7789v_get_modes,
366 .prepare = st7789v_prepare,
367 .unprepare = st7789v_unprepare,
368};
369
370static int st7789v_probe(struct spi_device *spi)
371{
372 struct device_node *backlight;
373 struct st7789v *ctx;
374 int ret;
375
376 ctx = devm_kzalloc(&spi->dev, sizeof(*ctx), GFP_KERNEL);
377 if (!ctx)
378 return -ENOMEM;
379
380 spi_set_drvdata(spi, ctx);
381 ctx->spi = spi;
382
383 ctx->panel.dev = &spi->dev;
384 ctx->panel.funcs = &st7789v_drm_funcs;
385
386 ctx->power = devm_regulator_get(&spi->dev, "power");
387 if (IS_ERR(ctx->power))
388 return PTR_ERR(ctx->power);
389
390 ctx->reset = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW);
391 if (IS_ERR(ctx->reset)) {
392 dev_err(&spi->dev, "Couldn't get our reset line\n");
393 return PTR_ERR(ctx->reset);
394 }
395
396 backlight = of_parse_phandle(spi->dev.of_node, "backlight", 0);
397 if (backlight) {
398 ctx->backlight = of_find_backlight_by_node(backlight);
399 of_node_put(backlight);
400
401 if (!ctx->backlight)
402 return -EPROBE_DEFER;
403 }
404
405 ret = drm_panel_add(&ctx->panel);
406 if (ret < 0)
407 goto err_free_backlight;
408
409 return 0;
410
411err_free_backlight:
412 if (ctx->backlight)
413 put_device(&ctx->backlight->dev);
414
415 return ret;
416}
417
418static int st7789v_remove(struct spi_device *spi)
419{
420 struct st7789v *ctx = spi_get_drvdata(spi);
421
422 drm_panel_detach(&ctx->panel);
423 drm_panel_remove(&ctx->panel);
424
425 if (ctx->backlight)
426 put_device(&ctx->backlight->dev);
427
428 return 0;
429}
430
431static const struct of_device_id st7789v_of_match[] = {
432 { .compatible = "sitronix,st7789v" },
433 { }
434};
435MODULE_DEVICE_TABLE(of, st7789v_of_match);
436
437static struct spi_driver st7789v_driver = {
438 .probe = st7789v_probe,
439 .remove = st7789v_remove,
440 .driver = {
441 .name = "st7789v",
442 .of_match_table = st7789v_of_match,
443 },
444};
445module_spi_driver(st7789v_driver);
446
447MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
448MODULE_DESCRIPTION("Sitronix st7789v LCD Driver");
449MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 7d1cab57c89e..0fdedee4509d 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -393,6 +393,7 @@ static struct ttm_bo_driver qxl_bo_driver = {
393 .verify_access = &qxl_verify_access, 393 .verify_access = &qxl_verify_access,
394 .io_mem_reserve = &qxl_ttm_io_mem_reserve, 394 .io_mem_reserve = &qxl_ttm_io_mem_reserve,
395 .io_mem_free = &qxl_ttm_io_mem_free, 395 .io_mem_free = &qxl_ttm_io_mem_free,
396 .io_mem_pfn = ttm_bo_default_io_mem_pfn,
396 .move_notify = &qxl_bo_move_notify, 397 .move_notify = &qxl_bo_move_notify,
397}; 398};
398 399
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index aefca0b03f38..c31e660e35db 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3295,7 +3295,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
3295 mem_trp = ((temp >> 8) & 0x7) + 1; 3295 mem_trp = ((temp >> 8) & 0x7) + 1;
3296 mem_tras = ((temp >> 11) & 0xf) + 4; 3296 mem_tras = ((temp >> 11) & 0xf) + 4;
3297 } else if (rdev->family == CHIP_RV350 || 3297 } else if (rdev->family == CHIP_RV350 ||
3298 rdev->family <= CHIP_RV380) { 3298 rdev->family == CHIP_RV380) {
3299 /* rv3x0 */ 3299 /* rv3x0 */
3300 mem_trcd = (temp & 0x7) + 3; 3300 mem_trcd = (temp & 0x7) + 3;
3301 mem_trp = ((temp >> 8) & 0x7) + 3; 3301 mem_trp = ((temp >> 8) & 0x7) + 3;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 2e400dc414e3..c1c8e2208a21 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -499,6 +499,7 @@ struct radeon_bo {
499 u32 tiling_flags; 499 u32 tiling_flags;
500 u32 pitch; 500 u32 pitch;
501 int surface_reg; 501 int surface_reg;
502 unsigned prime_shared_count;
502 /* list of all virtual address to which this bo 503 /* list of all virtual address to which this bo
503 * is associated to 504 * is associated to
504 */ 505 */
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index a8442f7196d6..df6b58c08544 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -164,6 +164,16 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
164 p->relocs[i].allowed_domains = domain; 164 p->relocs[i].allowed_domains = domain;
165 } 165 }
166 166
167 /* Objects shared as dma-bufs cannot be moved to VRAM */
168 if (p->relocs[i].robj->prime_shared_count) {
169 p->relocs[i].allowed_domains &= ~RADEON_GEM_DOMAIN_VRAM;
170 if (!p->relocs[i].allowed_domains) {
171 DRM_ERROR("BO associated with dma-buf cannot "
172 "be moved to VRAM\n");
173 return -EINVAL;
174 }
175 }
176
167 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; 177 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
168 p->relocs[i].tv.shared = !r->write_domain; 178 p->relocs[i].tv.shared = !r->write_domain;
169 179
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 146297a702ab..17d3dafc8319 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -232,7 +232,8 @@ void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
232} 232}
233 233
234static int radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 234static int radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
235 u16 *blue, uint32_t size) 235 u16 *blue, uint32_t size,
236 struct drm_modeset_acquire_ctx *ctx)
236{ 237{
237 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 238 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
238 int i; 239 int i;
@@ -1354,6 +1355,12 @@ radeon_user_framebuffer_create(struct drm_device *dev,
1354 return ERR_PTR(-ENOENT); 1355 return ERR_PTR(-ENOENT);
1355 } 1356 }
1356 1357
1358 /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
1359 if (obj->import_attach) {
1360 DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
1361 return ERR_PTR(-EINVAL);
1362 }
1363
1357 radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); 1364 radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
1358 if (radeon_fb == NULL) { 1365 if (radeon_fb == NULL) {
1359 drm_gem_object_unreference_unlocked(obj); 1366 drm_gem_object_unreference_unlocked(obj);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 9b0b123ce079..dddb372de2b9 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -120,6 +120,10 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
120 return r; 120 return r;
121 } 121 }
122 } 122 }
123 if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
124 /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
125 return -EINVAL;
126 }
123 return 0; 127 return 0;
124} 128}
125 129
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 74b276060c20..bec2ec056de4 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -352,6 +352,11 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
352 352
353 return 0; 353 return 0;
354 } 354 }
355 if (bo->prime_shared_count && domain == RADEON_GEM_DOMAIN_VRAM) {
356 /* A BO shared as a dma-buf cannot be sensibly migrated to VRAM */
357 return -EINVAL;
358 }
359
355 radeon_ttm_placement_from_domain(bo, domain); 360 radeon_ttm_placement_from_domain(bo, domain);
356 for (i = 0; i < bo->placement.num_placement; i++) { 361 for (i = 0; i < bo->placement.num_placement; i++) {
357 /* force to pin into visible video ram */ 362 /* force to pin into visible video ram */
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index f3609c97496b..7110d403322c 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -77,6 +77,7 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
77 list_add_tail(&bo->list, &rdev->gem.objects); 77 list_add_tail(&bo->list, &rdev->gem.objects);
78 mutex_unlock(&rdev->gem.mutex); 78 mutex_unlock(&rdev->gem.mutex);
79 79
80 bo->prime_shared_count = 1;
80 return &bo->gem_base; 81 return &bo->gem_base;
81} 82}
82 83
@@ -91,6 +92,9 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj)
91 92
92 /* pin buffer into GTT */ 93 /* pin buffer into GTT */
93 ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL); 94 ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
95 if (likely(ret == 0))
96 bo->prime_shared_count++;
97
94 radeon_bo_unreserve(bo); 98 radeon_bo_unreserve(bo);
95 return ret; 99 return ret;
96} 100}
@@ -105,6 +109,8 @@ void radeon_gem_prime_unpin(struct drm_gem_object *obj)
105 return; 109 return;
106 110
107 radeon_bo_unpin(bo); 111 radeon_bo_unpin(bo);
112 if (bo->prime_shared_count)
113 bo->prime_shared_count--;
108 radeon_bo_unreserve(bo); 114 radeon_bo_unreserve(bo);
109} 115}
110 116
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 684f1703aa5c..8b7623b5a624 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -213,8 +213,8 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
213 rbo->placement.num_busy_placement = 0; 213 rbo->placement.num_busy_placement = 0;
214 for (i = 0; i < rbo->placement.num_placement; i++) { 214 for (i = 0; i < rbo->placement.num_placement; i++) {
215 if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) { 215 if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
216 if (rbo->placements[0].fpfn < fpfn) 216 if (rbo->placements[i].fpfn < fpfn)
217 rbo->placements[0].fpfn = fpfn; 217 rbo->placements[i].fpfn = fpfn;
218 } else { 218 } else {
219 rbo->placement.busy_placement = 219 rbo->placement.busy_placement =
220 &rbo->placements[i]; 220 &rbo->placements[i];
@@ -873,6 +873,7 @@ static struct ttm_bo_driver radeon_bo_driver = {
873 .fault_reserve_notify = &radeon_bo_fault_reserve_notify, 873 .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
874 .io_mem_reserve = &radeon_ttm_io_mem_reserve, 874 .io_mem_reserve = &radeon_ttm_io_mem_reserve,
875 .io_mem_free = &radeon_ttm_io_mem_free, 875 .io_mem_free = &radeon_ttm_io_mem_free,
876 .io_mem_pfn = ttm_bo_default_io_mem_pfn,
876}; 877};
877 878
878int radeon_ttm_init(struct radeon_device *rdev) 879int radeon_ttm_init(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 91ebe5c2c7a0..d8fa7a9c9240 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -417,7 +417,8 @@ static void rockchip_dp_unbind(struct device *dev, struct device *master,
417 417
418 rockchip_drm_psr_unregister(&dp->encoder); 418 rockchip_drm_psr_unregister(&dp->encoder);
419 419
420 return analogix_dp_unbind(dev, master, data); 420 analogix_dp_unbind(dev, master, data);
421 clk_disable_unprepare(dp->pclk);
421} 422}
422 423
423static const struct component_ops rockchip_dp_component_ops = { 424static const struct component_ops rockchip_dp_component_ops = {
@@ -428,31 +429,13 @@ static const struct component_ops rockchip_dp_component_ops = {
428static int rockchip_dp_probe(struct platform_device *pdev) 429static int rockchip_dp_probe(struct platform_device *pdev)
429{ 430{
430 struct device *dev = &pdev->dev; 431 struct device *dev = &pdev->dev;
431 struct device_node *panel_node, *port, *endpoint;
432 struct drm_panel *panel = NULL; 432 struct drm_panel *panel = NULL;
433 struct rockchip_dp_device *dp; 433 struct rockchip_dp_device *dp;
434 int ret;
434 435
435 port = of_graph_get_port_by_id(dev->of_node, 1); 436 ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
436 if (port) { 437 if (ret)
437 endpoint = of_get_child_by_name(port, "endpoint"); 438 return ret;
438 of_node_put(port);
439 if (!endpoint) {
440 dev_err(dev, "no output endpoint found\n");
441 return -EINVAL;
442 }
443
444 panel_node = of_graph_get_remote_port_parent(endpoint);
445 of_node_put(endpoint);
446 if (!panel_node) {
447 dev_err(dev, "no output node found\n");
448 return -EINVAL;
449 }
450
451 panel = of_drm_find_panel(panel_node);
452 of_node_put(panel_node);
453 if (!panel)
454 return -EPROBE_DEFER;
455 }
456 439
457 dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); 440 dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
458 if (!dp) 441 if (!dp)
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index 4e55d63c3ef3..a2169dd3d26b 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -1053,6 +1053,7 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
1053 dp->connected = false; 1053 dp->connected = false;
1054 dp->active = false; 1054 dp->active = false;
1055 dp->active_port = -1; 1055 dp->active_port = -1;
1056 dp->fw_loaded = false;
1056 1057
1057 INIT_WORK(&dp->event_work, cdn_dp_pd_event_work); 1058 INIT_WORK(&dp->event_work, cdn_dp_pd_event_work);
1058 1059
@@ -1091,8 +1092,6 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
1091 goto err_free_connector; 1092 goto err_free_connector;
1092 } 1093 }
1093 1094
1094 cdn_dp_audio_codec_init(dp, dev);
1095
1096 for (i = 0; i < dp->ports; i++) { 1095 for (i = 0; i < dp->ports; i++) {
1097 port = dp->port[i]; 1096 port = dp->port[i];
1098 1097
@@ -1127,13 +1126,13 @@ static void cdn_dp_unbind(struct device *dev, struct device *master, void *data)
1127 struct drm_connector *connector = &dp->connector; 1126 struct drm_connector *connector = &dp->connector;
1128 1127
1129 cancel_work_sync(&dp->event_work); 1128 cancel_work_sync(&dp->event_work);
1130 platform_device_unregister(dp->audio_pdev);
1131 cdn_dp_encoder_disable(encoder); 1129 cdn_dp_encoder_disable(encoder);
1132 encoder->funcs->destroy(encoder); 1130 encoder->funcs->destroy(encoder);
1133 connector->funcs->destroy(connector); 1131 connector->funcs->destroy(connector);
1134 1132
1135 pm_runtime_disable(dev); 1133 pm_runtime_disable(dev);
1136 release_firmware(dp->fw); 1134 if (dp->fw_loaded)
1135 release_firmware(dp->fw);
1137 kfree(dp->edid); 1136 kfree(dp->edid);
1138 dp->edid = NULL; 1137 dp->edid = NULL;
1139} 1138}
@@ -1219,6 +1218,8 @@ static int cdn_dp_probe(struct platform_device *pdev)
1219 mutex_init(&dp->lock); 1218 mutex_init(&dp->lock);
1220 dev_set_drvdata(dev, dp); 1219 dev_set_drvdata(dev, dp);
1221 1220
1221 cdn_dp_audio_codec_init(dp, dev);
1222
1222 return component_add(dev, &cdn_dp_component_ops); 1223 return component_add(dev, &cdn_dp_component_ops);
1223} 1224}
1224 1225
@@ -1226,6 +1227,7 @@ static int cdn_dp_remove(struct platform_device *pdev)
1226{ 1227{
1227 struct cdn_dp_device *dp = platform_get_drvdata(pdev); 1228 struct cdn_dp_device *dp = platform_get_drvdata(pdev);
1228 1229
1230 platform_device_unregister(dp->audio_pdev);
1229 cdn_dp_suspend(dp->dev); 1231 cdn_dp_suspend(dp->dev);
1230 component_del(&pdev->dev, &cdn_dp_component_ops); 1232 component_del(&pdev->dev, &cdn_dp_component_ops);
1231 1233
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index cd7d02e1f758..c6b1b7f3a2a3 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -136,21 +136,24 @@ static int rockchip_drm_bind(struct device *dev)
136 INIT_LIST_HEAD(&private->psr_list); 136 INIT_LIST_HEAD(&private->psr_list);
137 spin_lock_init(&private->psr_list_lock); 137 spin_lock_init(&private->psr_list_lock);
138 138
139 ret = rockchip_drm_init_iommu(drm_dev);
140 if (ret)
141 goto err_free;
142
139 drm_mode_config_init(drm_dev); 143 drm_mode_config_init(drm_dev);
140 144
141 rockchip_drm_mode_config_init(drm_dev); 145 rockchip_drm_mode_config_init(drm_dev);
142 146
143 ret = rockchip_drm_init_iommu(drm_dev);
144 if (ret)
145 goto err_config_cleanup;
146
147 /* Try to bind all sub drivers. */ 147 /* Try to bind all sub drivers. */
148 ret = component_bind_all(dev, drm_dev); 148 ret = component_bind_all(dev, drm_dev);
149 if (ret) 149 if (ret)
150 goto err_iommu_cleanup; 150 goto err_mode_config_cleanup;
151 151
152 /* init kms poll for handling hpd */ 152 ret = drm_vblank_init(drm_dev, drm_dev->mode_config.num_crtc);
153 drm_kms_helper_poll_init(drm_dev); 153 if (ret)
154 goto err_unbind_all;
155
156 drm_mode_config_reset(drm_dev);
154 157
155 /* 158 /*
156 * enable drm irq mode. 159 * enable drm irq mode.
@@ -158,15 +161,12 @@ static int rockchip_drm_bind(struct device *dev)
158 */ 161 */
159 drm_dev->irq_enabled = true; 162 drm_dev->irq_enabled = true;
160 163
161 ret = drm_vblank_init(drm_dev, ROCKCHIP_MAX_CRTC); 164 /* init kms poll for handling hpd */
162 if (ret) 165 drm_kms_helper_poll_init(drm_dev);
163 goto err_kms_helper_poll_fini;
164
165 drm_mode_config_reset(drm_dev);
166 166
167 ret = rockchip_drm_fbdev_init(drm_dev); 167 ret = rockchip_drm_fbdev_init(drm_dev);
168 if (ret) 168 if (ret)
169 goto err_vblank_cleanup; 169 goto err_kms_helper_poll_fini;
170 170
171 ret = drm_dev_register(drm_dev, 0); 171 ret = drm_dev_register(drm_dev, 0);
172 if (ret) 172 if (ret)
@@ -175,17 +175,17 @@ static int rockchip_drm_bind(struct device *dev)
175 return 0; 175 return 0;
176err_fbdev_fini: 176err_fbdev_fini:
177 rockchip_drm_fbdev_fini(drm_dev); 177 rockchip_drm_fbdev_fini(drm_dev);
178err_vblank_cleanup:
179 drm_vblank_cleanup(drm_dev);
180err_kms_helper_poll_fini: 178err_kms_helper_poll_fini:
181 drm_kms_helper_poll_fini(drm_dev); 179 drm_kms_helper_poll_fini(drm_dev);
180 drm_vblank_cleanup(drm_dev);
181err_unbind_all:
182 component_unbind_all(dev, drm_dev); 182 component_unbind_all(dev, drm_dev);
183err_iommu_cleanup: 183err_mode_config_cleanup:
184 rockchip_iommu_cleanup(drm_dev);
185err_config_cleanup:
186 drm_mode_config_cleanup(drm_dev); 184 drm_mode_config_cleanup(drm_dev);
187 drm_dev->dev_private = NULL; 185 rockchip_iommu_cleanup(drm_dev);
188err_free: 186err_free:
187 drm_dev->dev_private = NULL;
188 dev_set_drvdata(dev, NULL);
189 drm_dev_unref(drm_dev); 189 drm_dev_unref(drm_dev);
190 return ret; 190 return ret;
191} 191}
@@ -194,16 +194,20 @@ static void rockchip_drm_unbind(struct device *dev)
194{ 194{
195 struct drm_device *drm_dev = dev_get_drvdata(dev); 195 struct drm_device *drm_dev = dev_get_drvdata(dev);
196 196
197 drm_dev_unregister(drm_dev);
198
197 rockchip_drm_fbdev_fini(drm_dev); 199 rockchip_drm_fbdev_fini(drm_dev);
198 drm_vblank_cleanup(drm_dev);
199 drm_kms_helper_poll_fini(drm_dev); 200 drm_kms_helper_poll_fini(drm_dev);
201
202 drm_atomic_helper_shutdown(drm_dev);
203 drm_vblank_cleanup(drm_dev);
200 component_unbind_all(dev, drm_dev); 204 component_unbind_all(dev, drm_dev);
201 rockchip_iommu_cleanup(drm_dev);
202 drm_mode_config_cleanup(drm_dev); 205 drm_mode_config_cleanup(drm_dev);
206 rockchip_iommu_cleanup(drm_dev);
207
203 drm_dev->dev_private = NULL; 208 drm_dev->dev_private = NULL;
204 drm_dev_unregister(drm_dev);
205 drm_dev_unref(drm_dev);
206 dev_set_drvdata(dev, NULL); 209 dev_set_drvdata(dev, NULL);
210 drm_dev_unref(drm_dev);
207} 211}
208 212
209static void rockchip_drm_lastclose(struct drm_device *dev) 213static void rockchip_drm_lastclose(struct drm_device *dev)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 2151e1cee4b4..3f7a82d1e095 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -506,7 +506,7 @@ static int vop_enable(struct drm_crtc *crtc)
506 ret = pm_runtime_get_sync(vop->dev); 506 ret = pm_runtime_get_sync(vop->dev);
507 if (ret < 0) { 507 if (ret < 0) {
508 dev_err(vop->dev, "failed to get pm runtime: %d\n", ret); 508 dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
509 goto err_put_pm_runtime; 509 return ret;
510 } 510 }
511 511
512 ret = clk_enable(vop->hclk); 512 ret = clk_enable(vop->hclk);
@@ -1405,10 +1405,16 @@ static int vop_initial(struct vop *vop)
1405 return PTR_ERR(vop->dclk); 1405 return PTR_ERR(vop->dclk);
1406 } 1406 }
1407 1407
1408 ret = pm_runtime_get_sync(vop->dev);
1409 if (ret < 0) {
1410 dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
1411 return ret;
1412 }
1413
1408 ret = clk_prepare(vop->dclk); 1414 ret = clk_prepare(vop->dclk);
1409 if (ret < 0) { 1415 if (ret < 0) {
1410 dev_err(vop->dev, "failed to prepare dclk\n"); 1416 dev_err(vop->dev, "failed to prepare dclk\n");
1411 return ret; 1417 goto err_put_pm_runtime;
1412 } 1418 }
1413 1419
1414 /* Enable both the hclk and aclk to setup the vop */ 1420 /* Enable both the hclk and aclk to setup the vop */
@@ -1468,6 +1474,8 @@ static int vop_initial(struct vop *vop)
1468 1474
1469 vop->is_enabled = false; 1475 vop->is_enabled = false;
1470 1476
1477 pm_runtime_put_sync(vop->dev);
1478
1471 return 0; 1479 return 0;
1472 1480
1473err_disable_aclk: 1481err_disable_aclk:
@@ -1476,6 +1484,8 @@ err_disable_hclk:
1476 clk_disable_unprepare(vop->hclk); 1484 clk_disable_unprepare(vop->hclk);
1477err_unprepare_dclk: 1485err_unprepare_dclk:
1478 clk_unprepare(vop->dclk); 1486 clk_unprepare(vop->dclk);
1487err_put_pm_runtime:
1488 pm_runtime_put_sync(vop->dev);
1479 return ret; 1489 return ret;
1480} 1490}
1481 1491
@@ -1576,12 +1586,6 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
1576 if (!vop->regsbak) 1586 if (!vop->regsbak)
1577 return -ENOMEM; 1587 return -ENOMEM;
1578 1588
1579 ret = vop_initial(vop);
1580 if (ret < 0) {
1581 dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret);
1582 return ret;
1583 }
1584
1585 irq = platform_get_irq(pdev, 0); 1589 irq = platform_get_irq(pdev, 0);
1586 if (irq < 0) { 1590 if (irq < 0) {
1587 dev_err(dev, "cannot find irq for vop\n"); 1591 dev_err(dev, "cannot find irq for vop\n");
@@ -1608,8 +1612,17 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
1608 1612
1609 pm_runtime_enable(&pdev->dev); 1613 pm_runtime_enable(&pdev->dev);
1610 1614
1615 ret = vop_initial(vop);
1616 if (ret < 0) {
1617 dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret);
1618 goto err_disable_pm_runtime;
1619 }
1620
1611 return 0; 1621 return 0;
1612 1622
1623err_disable_pm_runtime:
1624 pm_runtime_disable(&pdev->dev);
1625 vop_destroy_crtc(vop);
1613err_enable_irq: 1626err_enable_irq:
1614 enable_irq(vop->irq); /* To balance out the disable_irq above */ 1627 enable_irq(vop->irq); /* To balance out the disable_irq above */
1615 return ret; 1628 return ret;
@@ -1621,6 +1634,10 @@ static void vop_unbind(struct device *dev, struct device *master, void *data)
1621 1634
1622 pm_runtime_disable(dev); 1635 pm_runtime_disable(dev);
1623 vop_destroy_crtc(vop); 1636 vop_destroy_crtc(vop);
1637
1638 clk_unprepare(vop->aclk);
1639 clk_unprepare(vop->hclk);
1640 clk_unprepare(vop->dclk);
1624} 1641}
1625 1642
1626const struct component_ops vop_component_ops = { 1643const struct component_ops vop_component_ops = {
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 86279f5022c2..88f16cdf6a4b 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -66,7 +66,9 @@ static struct gdp_format_to_str {
66#define GAM_GDP_ALPHARANGE_255 BIT(5) 66#define GAM_GDP_ALPHARANGE_255 BIT(5)
67#define GAM_GDP_AGC_FULL_RANGE 0x00808080 67#define GAM_GDP_AGC_FULL_RANGE 0x00808080
68#define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0)) 68#define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0))
69#define GAM_GDP_SIZE_MAX 0x7FF 69
70#define GAM_GDP_SIZE_MAX_WIDTH 3840
71#define GAM_GDP_SIZE_MAX_HEIGHT 2160
70 72
71#define GDP_NODE_NB_BANK 2 73#define GDP_NODE_NB_BANK 2
72#define GDP_NODE_PER_FIELD 2 74#define GDP_NODE_PER_FIELD 2
@@ -632,8 +634,8 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
632 /* src_x are in 16.16 format */ 634 /* src_x are in 16.16 format */
633 src_x = state->src_x >> 16; 635 src_x = state->src_x >> 16;
634 src_y = state->src_y >> 16; 636 src_y = state->src_y >> 16;
635 src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX); 637 src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX_WIDTH);
636 src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX); 638 src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX_HEIGHT);
637 639
638 format = sti_gdp_fourcc2format(fb->format->format); 640 format = sti_gdp_fourcc2format(fb->format->format);
639 if (format == -1) { 641 if (format == -1) {
@@ -741,8 +743,8 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
741 /* src_x are in 16.16 format */ 743 /* src_x are in 16.16 format */
742 src_x = state->src_x >> 16; 744 src_x = state->src_x >> 16;
743 src_y = state->src_y >> 16; 745 src_y = state->src_y >> 16;
744 src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX); 746 src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX_WIDTH);
745 src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX); 747 src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX_HEIGHT);
746 748
747 list = sti_gdp_get_free_nodes(gdp); 749 list = sti_gdp_get_free_nodes(gdp);
748 top_field = list->top_field; 750 top_field = list->top_field;
diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile
index d625a82a6e5f..59b757350a1f 100644
--- a/drivers/gpu/drm/sun4i/Makefile
+++ b/drivers/gpu/drm/sun4i/Makefile
@@ -1,11 +1,11 @@
1sun4i-drm-y += sun4i_crtc.o
2sun4i-drm-y += sun4i_drv.o 1sun4i-drm-y += sun4i_drv.o
3sun4i-drm-y += sun4i_framebuffer.o 2sun4i-drm-y += sun4i_framebuffer.o
4sun4i-drm-y += sun4i_layer.o
5 3
6sun4i-tcon-y += sun4i_tcon.o 4sun4i-tcon-y += sun4i_tcon.o
7sun4i-tcon-y += sun4i_rgb.o 5sun4i-tcon-y += sun4i_rgb.o
8sun4i-tcon-y += sun4i_dotclock.o 6sun4i-tcon-y += sun4i_dotclock.o
7sun4i-tcon-y += sun4i_crtc.o
8sun4i-tcon-y += sun4i_layer.o
9 9
10obj-$(CONFIG_DRM_SUN4I) += sun4i-drm.o sun4i-tcon.o 10obj-$(CONFIG_DRM_SUN4I) += sun4i-drm.o sun4i-tcon.o
11obj-$(CONFIG_DRM_SUN4I) += sun4i_backend.o 11obj-$(CONFIG_DRM_SUN4I) += sun4i_backend.o
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 08ce15070f80..d660741ba475 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -24,7 +24,7 @@
24#include "sun4i_backend.h" 24#include "sun4i_backend.h"
25#include "sun4i_drv.h" 25#include "sun4i_drv.h"
26 26
27static u32 sunxi_rgb2yuv_coef[12] = { 27static const u32 sunxi_rgb2yuv_coef[12] = {
28 0x00000107, 0x00000204, 0x00000064, 0x00000108, 28 0x00000107, 0x00000204, 0x00000064, 0x00000108,
29 0x00003f69, 0x00003ed6, 0x000001c1, 0x00000808, 29 0x00003f69, 0x00003ed6, 0x000001c1, 0x00000808,
30 0x000001c1, 0x00003e88, 0x00003fb8, 0x00000808 30 0x000001c1, 0x00003e88, 0x00003fb8, 0x00000808
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c
index a5d546a68e16..3c876c3a356a 100644
--- a/drivers/gpu/drm/sun4i/sun4i_crtc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c
@@ -19,6 +19,7 @@
19#include <linux/clk-provider.h> 19#include <linux/clk-provider.h>
20#include <linux/ioport.h> 20#include <linux/ioport.h>
21#include <linux/of_address.h> 21#include <linux/of_address.h>
22#include <linux/of_graph.h>
22#include <linux/of_irq.h> 23#include <linux/of_irq.h>
23#include <linux/regmap.h> 24#include <linux/regmap.h>
24 25
@@ -27,6 +28,7 @@
27#include "sun4i_backend.h" 28#include "sun4i_backend.h"
28#include "sun4i_crtc.h" 29#include "sun4i_crtc.h"
29#include "sun4i_drv.h" 30#include "sun4i_drv.h"
31#include "sun4i_layer.h"
30#include "sun4i_tcon.h" 32#include "sun4i_tcon.h"
31 33
32static void sun4i_crtc_atomic_begin(struct drm_crtc *crtc, 34static void sun4i_crtc_atomic_begin(struct drm_crtc *crtc,
@@ -50,12 +52,11 @@ static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc,
50 struct drm_crtc_state *old_state) 52 struct drm_crtc_state *old_state)
51{ 53{
52 struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); 54 struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
53 struct sun4i_drv *drv = scrtc->drv;
54 struct drm_pending_vblank_event *event = crtc->state->event; 55 struct drm_pending_vblank_event *event = crtc->state->event;
55 56
56 DRM_DEBUG_DRIVER("Committing plane changes\n"); 57 DRM_DEBUG_DRIVER("Committing plane changes\n");
57 58
58 sun4i_backend_commit(drv->backend); 59 sun4i_backend_commit(scrtc->backend);
59 60
60 if (event) { 61 if (event) {
61 crtc->state->event = NULL; 62 crtc->state->event = NULL;
@@ -72,11 +73,10 @@ static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc,
72static void sun4i_crtc_disable(struct drm_crtc *crtc) 73static void sun4i_crtc_disable(struct drm_crtc *crtc)
73{ 74{
74 struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); 75 struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
75 struct sun4i_drv *drv = scrtc->drv;
76 76
77 DRM_DEBUG_DRIVER("Disabling the CRTC\n"); 77 DRM_DEBUG_DRIVER("Disabling the CRTC\n");
78 78
79 sun4i_tcon_disable(drv->tcon); 79 sun4i_tcon_disable(scrtc->tcon);
80 80
81 if (crtc->state->event && !crtc->state->active) { 81 if (crtc->state->event && !crtc->state->active) {
82 spin_lock_irq(&crtc->dev->event_lock); 82 spin_lock_irq(&crtc->dev->event_lock);
@@ -90,11 +90,10 @@ static void sun4i_crtc_disable(struct drm_crtc *crtc)
90static void sun4i_crtc_enable(struct drm_crtc *crtc) 90static void sun4i_crtc_enable(struct drm_crtc *crtc)
91{ 91{
92 struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); 92 struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
93 struct sun4i_drv *drv = scrtc->drv;
94 93
95 DRM_DEBUG_DRIVER("Enabling the CRTC\n"); 94 DRM_DEBUG_DRIVER("Enabling the CRTC\n");
96 95
97 sun4i_tcon_enable(drv->tcon); 96 sun4i_tcon_enable(scrtc->tcon);
98} 97}
99 98
100static const struct drm_crtc_helper_funcs sun4i_crtc_helper_funcs = { 99static const struct drm_crtc_helper_funcs sun4i_crtc_helper_funcs = {
@@ -107,11 +106,10 @@ static const struct drm_crtc_helper_funcs sun4i_crtc_helper_funcs = {
107static int sun4i_crtc_enable_vblank(struct drm_crtc *crtc) 106static int sun4i_crtc_enable_vblank(struct drm_crtc *crtc)
108{ 107{
109 struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); 108 struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
110 struct sun4i_drv *drv = scrtc->drv;
111 109
112 DRM_DEBUG_DRIVER("Enabling VBLANK on crtc %p\n", crtc); 110 DRM_DEBUG_DRIVER("Enabling VBLANK on crtc %p\n", crtc);
113 111
114 sun4i_tcon_enable_vblank(drv->tcon, true); 112 sun4i_tcon_enable_vblank(scrtc->tcon, true);
115 113
116 return 0; 114 return 0;
117} 115}
@@ -119,11 +117,10 @@ static int sun4i_crtc_enable_vblank(struct drm_crtc *crtc)
119static void sun4i_crtc_disable_vblank(struct drm_crtc *crtc) 117static void sun4i_crtc_disable_vblank(struct drm_crtc *crtc)
120{ 118{
121 struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); 119 struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
122 struct sun4i_drv *drv = scrtc->drv;
123 120
124 DRM_DEBUG_DRIVER("Disabling VBLANK on crtc %p\n", crtc); 121 DRM_DEBUG_DRIVER("Disabling VBLANK on crtc %p\n", crtc);
125 122
126 sun4i_tcon_enable_vblank(drv->tcon, false); 123 sun4i_tcon_enable_vblank(scrtc->tcon, false);
127} 124}
128 125
129static const struct drm_crtc_funcs sun4i_crtc_funcs = { 126static const struct drm_crtc_funcs sun4i_crtc_funcs = {
@@ -137,28 +134,67 @@ static const struct drm_crtc_funcs sun4i_crtc_funcs = {
137 .disable_vblank = sun4i_crtc_disable_vblank, 134 .disable_vblank = sun4i_crtc_disable_vblank,
138}; 135};
139 136
140struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm) 137struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm,
138 struct sun4i_backend *backend,
139 struct sun4i_tcon *tcon)
141{ 140{
142 struct sun4i_drv *drv = drm->dev_private;
143 struct sun4i_crtc *scrtc; 141 struct sun4i_crtc *scrtc;
144 int ret; 142 struct drm_plane *primary = NULL, *cursor = NULL;
143 int ret, i;
145 144
146 scrtc = devm_kzalloc(drm->dev, sizeof(*scrtc), GFP_KERNEL); 145 scrtc = devm_kzalloc(drm->dev, sizeof(*scrtc), GFP_KERNEL);
147 if (!scrtc) 146 if (!scrtc)
147 return ERR_PTR(-ENOMEM);
148 scrtc->backend = backend;
149 scrtc->tcon = tcon;
150
151 /* Create our layers */
152 scrtc->layers = sun4i_layers_init(drm, scrtc->backend);
153 if (IS_ERR(scrtc->layers)) {
154 dev_err(drm->dev, "Couldn't create the planes\n");
148 return NULL; 155 return NULL;
149 scrtc->drv = drv; 156 }
157
158 /* find primary and cursor planes for drm_crtc_init_with_planes */
159 for (i = 0; scrtc->layers[i]; i++) {
160 struct sun4i_layer *layer = scrtc->layers[i];
161
162 switch (layer->plane.type) {
163 case DRM_PLANE_TYPE_PRIMARY:
164 primary = &layer->plane;
165 break;
166 case DRM_PLANE_TYPE_CURSOR:
167 cursor = &layer->plane;
168 break;
169 default:
170 break;
171 }
172 }
150 173
151 ret = drm_crtc_init_with_planes(drm, &scrtc->crtc, 174 ret = drm_crtc_init_with_planes(drm, &scrtc->crtc,
152 drv->primary, 175 primary,
153 NULL, 176 cursor,
154 &sun4i_crtc_funcs, 177 &sun4i_crtc_funcs,
155 NULL); 178 NULL);
156 if (ret) { 179 if (ret) {
157 dev_err(drm->dev, "Couldn't init DRM CRTC\n"); 180 dev_err(drm->dev, "Couldn't init DRM CRTC\n");
158 return NULL; 181 return ERR_PTR(ret);
159 } 182 }
160 183
161 drm_crtc_helper_add(&scrtc->crtc, &sun4i_crtc_helper_funcs); 184 drm_crtc_helper_add(&scrtc->crtc, &sun4i_crtc_helper_funcs);
162 185
186 /* Set crtc.port to output port node of the tcon */
187 scrtc->crtc.port = of_graph_get_port_by_id(scrtc->tcon->dev->of_node,
188 1);
189
190 /* Set possible_crtcs to this crtc for overlay planes */
191 for (i = 0; scrtc->layers[i]; i++) {
192 uint32_t possible_crtcs = BIT(drm_crtc_index(&scrtc->crtc));
193 struct sun4i_layer *layer = scrtc->layers[i];
194
195 if (layer->plane.type == DRM_PLANE_TYPE_OVERLAY)
196 layer->plane.possible_crtcs = possible_crtcs;
197 }
198
163 return scrtc; 199 return scrtc;
164} 200}
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.h b/drivers/gpu/drm/sun4i/sun4i_crtc.h
index dec8ce4d9b25..230cb8f0d601 100644
--- a/drivers/gpu/drm/sun4i/sun4i_crtc.h
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.h
@@ -17,7 +17,9 @@ struct sun4i_crtc {
17 struct drm_crtc crtc; 17 struct drm_crtc crtc;
18 struct drm_pending_vblank_event *event; 18 struct drm_pending_vblank_event *event;
19 19
20 struct sun4i_drv *drv; 20 struct sun4i_backend *backend;
21 struct sun4i_tcon *tcon;
22 struct sun4i_layer **layers;
21}; 23};
22 24
23static inline struct sun4i_crtc *drm_crtc_to_sun4i_crtc(struct drm_crtc *crtc) 25static inline struct sun4i_crtc *drm_crtc_to_sun4i_crtc(struct drm_crtc *crtc)
@@ -25,6 +27,8 @@ static inline struct sun4i_crtc *drm_crtc_to_sun4i_crtc(struct drm_crtc *crtc)
25 return container_of(crtc, struct sun4i_crtc, crtc); 27 return container_of(crtc, struct sun4i_crtc, crtc);
26} 28}
27 29
28struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm); 30struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm,
31 struct sun4i_backend *backend,
32 struct sun4i_tcon *tcon);
29 33
30#endif /* _SUN4I_CRTC_H_ */ 34#endif /* _SUN4I_CRTC_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 329ea56106a5..8ddd72cd5873 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/component.h> 13#include <linux/component.h>
14#include <linux/of_graph.h> 14#include <linux/of_graph.h>
15#include <linux/of_reserved_mem.h>
15 16
16#include <drm/drmP.h> 17#include <drm/drmP.h>
17#include <drm/drm_crtc_helper.h> 18#include <drm/drm_crtc_helper.h>
@@ -20,10 +21,9 @@
20#include <drm/drm_fb_helper.h> 21#include <drm/drm_fb_helper.h>
21#include <drm/drm_of.h> 22#include <drm/drm_of.h>
22 23
23#include "sun4i_crtc.h"
24#include "sun4i_drv.h" 24#include "sun4i_drv.h"
25#include "sun4i_framebuffer.h" 25#include "sun4i_framebuffer.h"
26#include "sun4i_layer.h" 26#include "sun4i_tcon.h"
27 27
28DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops); 28DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
29 29
@@ -92,30 +92,25 @@ static int sun4i_drv_bind(struct device *dev)
92 } 92 }
93 drm->dev_private = drv; 93 drm->dev_private = drv;
94 94
95 drm_vblank_init(drm, 1); 95 ret = of_reserved_mem_device_init(dev);
96 if (ret && ret != -ENODEV) {
97 dev_err(drm->dev, "Couldn't claim our memory region\n");
98 goto free_drm;
99 }
100
101 /* drm_vblank_init calls kcalloc, which can fail */
102 ret = drm_vblank_init(drm, 1);
103 if (ret)
104 goto free_mem_region;
105
96 drm_mode_config_init(drm); 106 drm_mode_config_init(drm);
97 107
98 ret = component_bind_all(drm->dev, drm); 108 ret = component_bind_all(drm->dev, drm);
99 if (ret) { 109 if (ret) {
100 dev_err(drm->dev, "Couldn't bind all pipelines components\n"); 110 dev_err(drm->dev, "Couldn't bind all pipelines components\n");
101 goto free_drm; 111 goto cleanup_mode_config;
102 }
103
104 /* Create our layers */
105 drv->layers = sun4i_layers_init(drm);
106 if (IS_ERR(drv->layers)) {
107 dev_err(drm->dev, "Couldn't create the planes\n");
108 ret = PTR_ERR(drv->layers);
109 goto free_drm;
110 } 112 }
111 113
112 /* Create our CRTC */
113 drv->crtc = sun4i_crtc_init(drm);
114 if (!drv->crtc) {
115 dev_err(drm->dev, "Couldn't create the CRTC\n");
116 ret = -EINVAL;
117 goto free_drm;
118 }
119 drm->irq_enabled = true; 114 drm->irq_enabled = true;
120 115
121 /* Remove early framebuffers (ie. simplefb) */ 116 /* Remove early framebuffers (ie. simplefb) */
@@ -126,7 +121,7 @@ static int sun4i_drv_bind(struct device *dev)
126 if (IS_ERR(drv->fbdev)) { 121 if (IS_ERR(drv->fbdev)) {
127 dev_err(drm->dev, "Couldn't create our framebuffer\n"); 122 dev_err(drm->dev, "Couldn't create our framebuffer\n");
128 ret = PTR_ERR(drv->fbdev); 123 ret = PTR_ERR(drv->fbdev);
129 goto free_drm; 124 goto cleanup_mode_config;
130 } 125 }
131 126
132 /* Enable connectors polling */ 127 /* Enable connectors polling */
@@ -134,10 +129,18 @@ static int sun4i_drv_bind(struct device *dev)
134 129
135 ret = drm_dev_register(drm, 0); 130 ret = drm_dev_register(drm, 0);
136 if (ret) 131 if (ret)
137 goto free_drm; 132 goto finish_poll;
138 133
139 return 0; 134 return 0;
140 135
136finish_poll:
137 drm_kms_helper_poll_fini(drm);
138 sun4i_framebuffer_free(drm);
139cleanup_mode_config:
140 drm_mode_config_cleanup(drm);
141 drm_vblank_cleanup(drm);
142free_mem_region:
143 of_reserved_mem_device_release(dev);
141free_drm: 144free_drm:
142 drm_dev_unref(drm); 145 drm_dev_unref(drm);
143 return ret; 146 return ret;
@@ -150,7 +153,9 @@ static void sun4i_drv_unbind(struct device *dev)
150 drm_dev_unregister(drm); 153 drm_dev_unregister(drm);
151 drm_kms_helper_poll_fini(drm); 154 drm_kms_helper_poll_fini(drm);
152 sun4i_framebuffer_free(drm); 155 sun4i_framebuffer_free(drm);
156 drm_mode_config_cleanup(drm);
153 drm_vblank_cleanup(drm); 157 drm_vblank_cleanup(drm);
158 of_reserved_mem_device_release(dev);
154 drm_dev_unref(drm); 159 drm_dev_unref(drm);
155} 160}
156 161
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.h b/drivers/gpu/drm/sun4i/sun4i_drv.h
index 597353eab728..5df50126ff52 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.h
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.h
@@ -18,13 +18,9 @@
18 18
19struct sun4i_drv { 19struct sun4i_drv {
20 struct sun4i_backend *backend; 20 struct sun4i_backend *backend;
21 struct sun4i_crtc *crtc;
22 struct sun4i_tcon *tcon; 21 struct sun4i_tcon *tcon;
23 22
24 struct drm_plane *primary;
25 struct drm_fbdev_cma *fbdev; 23 struct drm_fbdev_cma *fbdev;
26
27 struct sun4i_layer **layers;
28}; 24};
29 25
30#endif /* _SUN4I_DRV_H_ */ 26#endif /* _SUN4I_DRV_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
index 2c3beff8b53e..9872e0fc03b0 100644
--- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
@@ -48,5 +48,4 @@ void sun4i_framebuffer_free(struct drm_device *drm)
48 struct sun4i_drv *drv = drm->dev_private; 48 struct sun4i_drv *drv = drm->dev_private;
49 49
50 drm_fbdev_cma_fini(drv->fbdev); 50 drm_fbdev_cma_fini(drv->fbdev);
51 drm_mode_config_cleanup(drm);
52} 51}
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index 5d53c977bca5..f26bde5b9117 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -16,7 +16,6 @@
16#include <drm/drmP.h> 16#include <drm/drmP.h>
17 17
18#include "sun4i_backend.h" 18#include "sun4i_backend.h"
19#include "sun4i_drv.h"
20#include "sun4i_layer.h" 19#include "sun4i_layer.h"
21 20
22struct sun4i_plane_desc { 21struct sun4i_plane_desc {
@@ -36,8 +35,7 @@ static void sun4i_backend_layer_atomic_disable(struct drm_plane *plane,
36 struct drm_plane_state *old_state) 35 struct drm_plane_state *old_state)
37{ 36{
38 struct sun4i_layer *layer = plane_to_sun4i_layer(plane); 37 struct sun4i_layer *layer = plane_to_sun4i_layer(plane);
39 struct sun4i_drv *drv = layer->drv; 38 struct sun4i_backend *backend = layer->backend;
40 struct sun4i_backend *backend = drv->backend;
41 39
42 sun4i_backend_layer_enable(backend, layer->id, false); 40 sun4i_backend_layer_enable(backend, layer->id, false);
43} 41}
@@ -46,8 +44,7 @@ static void sun4i_backend_layer_atomic_update(struct drm_plane *plane,
46 struct drm_plane_state *old_state) 44 struct drm_plane_state *old_state)
47{ 45{
48 struct sun4i_layer *layer = plane_to_sun4i_layer(plane); 46 struct sun4i_layer *layer = plane_to_sun4i_layer(plane);
49 struct sun4i_drv *drv = layer->drv; 47 struct sun4i_backend *backend = layer->backend;
50 struct sun4i_backend *backend = drv->backend;
51 48
52 sun4i_backend_update_layer_coord(backend, layer->id, plane); 49 sun4i_backend_update_layer_coord(backend, layer->id, plane);
53 sun4i_backend_update_layer_formats(backend, layer->id, plane); 50 sun4i_backend_update_layer_formats(backend, layer->id, plane);
@@ -104,9 +101,9 @@ static const struct sun4i_plane_desc sun4i_backend_planes[] = {
104}; 101};
105 102
106static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm, 103static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
104 struct sun4i_backend *backend,
107 const struct sun4i_plane_desc *plane) 105 const struct sun4i_plane_desc *plane)
108{ 106{
109 struct sun4i_drv *drv = drm->dev_private;
110 struct sun4i_layer *layer; 107 struct sun4i_layer *layer;
111 int ret; 108 int ret;
112 109
@@ -114,7 +111,8 @@ static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
114 if (!layer) 111 if (!layer)
115 return ERR_PTR(-ENOMEM); 112 return ERR_PTR(-ENOMEM);
116 113
117 ret = drm_universal_plane_init(drm, &layer->plane, BIT(0), 114 /* possible crtcs are set later */
115 ret = drm_universal_plane_init(drm, &layer->plane, 0,
118 &sun4i_backend_layer_funcs, 116 &sun4i_backend_layer_funcs,
119 plane->formats, plane->nformats, 117 plane->formats, plane->nformats,
120 plane->type, NULL); 118 plane->type, NULL);
@@ -125,22 +123,19 @@ static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
125 123
126 drm_plane_helper_add(&layer->plane, 124 drm_plane_helper_add(&layer->plane,
127 &sun4i_backend_layer_helper_funcs); 125 &sun4i_backend_layer_helper_funcs);
128 layer->drv = drv; 126 layer->backend = backend;
129
130 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
131 drv->primary = &layer->plane;
132 127
133 return layer; 128 return layer;
134} 129}
135 130
136struct sun4i_layer **sun4i_layers_init(struct drm_device *drm) 131struct sun4i_layer **sun4i_layers_init(struct drm_device *drm,
132 struct sun4i_backend *backend)
137{ 133{
138 struct sun4i_drv *drv = drm->dev_private;
139 struct sun4i_layer **layers; 134 struct sun4i_layer **layers;
140 int i; 135 int i;
141 136
142 layers = devm_kcalloc(drm->dev, ARRAY_SIZE(sun4i_backend_planes), 137 layers = devm_kcalloc(drm->dev, ARRAY_SIZE(sun4i_backend_planes) + 1,
143 sizeof(**layers), GFP_KERNEL); 138 sizeof(*layers), GFP_KERNEL);
144 if (!layers) 139 if (!layers)
145 return ERR_PTR(-ENOMEM); 140 return ERR_PTR(-ENOMEM);
146 141
@@ -167,9 +162,9 @@ struct sun4i_layer **sun4i_layers_init(struct drm_device *drm)
167 */ 162 */
168 for (i = 0; i < ARRAY_SIZE(sun4i_backend_planes); i++) { 163 for (i = 0; i < ARRAY_SIZE(sun4i_backend_planes); i++) {
169 const struct sun4i_plane_desc *plane = &sun4i_backend_planes[i]; 164 const struct sun4i_plane_desc *plane = &sun4i_backend_planes[i];
170 struct sun4i_layer *layer = layers[i]; 165 struct sun4i_layer *layer;
171 166
172 layer = sun4i_layer_init_one(drm, plane); 167 layer = sun4i_layer_init_one(drm, backend, plane);
173 if (IS_ERR(layer)) { 168 if (IS_ERR(layer)) {
174 dev_err(drm->dev, "Couldn't initialize %s plane\n", 169 dev_err(drm->dev, "Couldn't initialize %s plane\n",
175 i ? "overlay" : "primary"); 170 i ? "overlay" : "primary");
@@ -178,11 +173,12 @@ struct sun4i_layer **sun4i_layers_init(struct drm_device *drm)
178 173
179 DRM_DEBUG_DRIVER("Assigning %s plane to pipe %d\n", 174 DRM_DEBUG_DRIVER("Assigning %s plane to pipe %d\n",
180 i ? "overlay" : "primary", plane->pipe); 175 i ? "overlay" : "primary", plane->pipe);
181 regmap_update_bits(drv->backend->regs, SUN4I_BACKEND_ATTCTL_REG0(i), 176 regmap_update_bits(backend->regs, SUN4I_BACKEND_ATTCTL_REG0(i),
182 SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK, 177 SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK,
183 SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(plane->pipe)); 178 SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(plane->pipe));
184 179
185 layer->id = i; 180 layer->id = i;
181 layers[i] = layer;
186 }; 182 };
187 183
188 return layers; 184 return layers;
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.h b/drivers/gpu/drm/sun4i/sun4i_layer.h
index a2f65d7a3f4e..4be1f0919df2 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.h
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.h
@@ -16,6 +16,7 @@
16struct sun4i_layer { 16struct sun4i_layer {
17 struct drm_plane plane; 17 struct drm_plane plane;
18 struct sun4i_drv *drv; 18 struct sun4i_drv *drv;
19 struct sun4i_backend *backend;
19 int id; 20 int id;
20}; 21};
21 22
@@ -25,6 +26,7 @@ plane_to_sun4i_layer(struct drm_plane *plane)
25 return container_of(plane, struct sun4i_layer, plane); 26 return container_of(plane, struct sun4i_layer, plane);
26} 27}
27 28
28struct sun4i_layer **sun4i_layers_init(struct drm_device *drm); 29struct sun4i_layer **sun4i_layers_init(struct drm_device *drm,
30 struct sun4i_backend *backend);
29 31
30#endif /* _SUN4I_LAYER_H_ */ 32#endif /* _SUN4I_LAYER_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index 757208f51731..67f0b91a99de 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -15,9 +15,10 @@
15#include <drm/drmP.h> 15#include <drm/drmP.h>
16#include <drm/drm_atomic_helper.h> 16#include <drm/drm_atomic_helper.h>
17#include <drm/drm_crtc_helper.h> 17#include <drm/drm_crtc_helper.h>
18#include <drm/drm_of.h>
18#include <drm/drm_panel.h> 19#include <drm/drm_panel.h>
19 20
20#include "sun4i_drv.h" 21#include "sun4i_crtc.h"
21#include "sun4i_tcon.h" 22#include "sun4i_tcon.h"
22#include "sun4i_rgb.h" 23#include "sun4i_rgb.h"
23 24
@@ -25,7 +26,7 @@ struct sun4i_rgb {
25 struct drm_connector connector; 26 struct drm_connector connector;
26 struct drm_encoder encoder; 27 struct drm_encoder encoder;
27 28
28 struct sun4i_drv *drv; 29 struct sun4i_tcon *tcon;
29}; 30};
30 31
31static inline struct sun4i_rgb * 32static inline struct sun4i_rgb *
@@ -46,8 +47,7 @@ static int sun4i_rgb_get_modes(struct drm_connector *connector)
46{ 47{
47 struct sun4i_rgb *rgb = 48 struct sun4i_rgb *rgb =
48 drm_connector_to_sun4i_rgb(connector); 49 drm_connector_to_sun4i_rgb(connector);
49 struct sun4i_drv *drv = rgb->drv; 50 struct sun4i_tcon *tcon = rgb->tcon;
50 struct sun4i_tcon *tcon = drv->tcon;
51 51
52 return drm_panel_get_modes(tcon->panel); 52 return drm_panel_get_modes(tcon->panel);
53} 53}
@@ -56,8 +56,7 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector,
56 struct drm_display_mode *mode) 56 struct drm_display_mode *mode)
57{ 57{
58 struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector); 58 struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector);
59 struct sun4i_drv *drv = rgb->drv; 59 struct sun4i_tcon *tcon = rgb->tcon;
60 struct sun4i_tcon *tcon = drv->tcon;
61 u32 hsync = mode->hsync_end - mode->hsync_start; 60 u32 hsync = mode->hsync_end - mode->hsync_start;
62 u32 vsync = mode->vsync_end - mode->vsync_start; 61 u32 vsync = mode->vsync_end - mode->vsync_start;
63 unsigned long rate = mode->clock * 1000; 62 unsigned long rate = mode->clock * 1000;
@@ -114,8 +113,7 @@ static void
114sun4i_rgb_connector_destroy(struct drm_connector *connector) 113sun4i_rgb_connector_destroy(struct drm_connector *connector)
115{ 114{
116 struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector); 115 struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector);
117 struct sun4i_drv *drv = rgb->drv; 116 struct sun4i_tcon *tcon = rgb->tcon;
118 struct sun4i_tcon *tcon = drv->tcon;
119 117
120 drm_panel_detach(tcon->panel); 118 drm_panel_detach(tcon->panel);
121 drm_connector_cleanup(connector); 119 drm_connector_cleanup(connector);
@@ -140,8 +138,7 @@ static int sun4i_rgb_atomic_check(struct drm_encoder *encoder,
140static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder) 138static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
141{ 139{
142 struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder); 140 struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
143 struct sun4i_drv *drv = rgb->drv; 141 struct sun4i_tcon *tcon = rgb->tcon;
144 struct sun4i_tcon *tcon = drv->tcon;
145 142
146 DRM_DEBUG_DRIVER("Enabling RGB output\n"); 143 DRM_DEBUG_DRIVER("Enabling RGB output\n");
147 144
@@ -157,8 +154,7 @@ static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
157static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder) 154static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
158{ 155{
159 struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder); 156 struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
160 struct sun4i_drv *drv = rgb->drv; 157 struct sun4i_tcon *tcon = rgb->tcon;
161 struct sun4i_tcon *tcon = drv->tcon;
162 158
163 DRM_DEBUG_DRIVER("Disabling RGB output\n"); 159 DRM_DEBUG_DRIVER("Disabling RGB output\n");
164 160
@@ -176,8 +172,7 @@ static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder,
176 struct drm_display_mode *adjusted_mode) 172 struct drm_display_mode *adjusted_mode)
177{ 173{
178 struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder); 174 struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
179 struct sun4i_drv *drv = rgb->drv; 175 struct sun4i_tcon *tcon = rgb->tcon;
180 struct sun4i_tcon *tcon = drv->tcon;
181 176
182 sun4i_tcon0_mode_set(tcon, mode); 177 sun4i_tcon0_mode_set(tcon, mode);
183 178
@@ -203,10 +198,8 @@ static struct drm_encoder_funcs sun4i_rgb_enc_funcs = {
203 .destroy = sun4i_rgb_enc_destroy, 198 .destroy = sun4i_rgb_enc_destroy,
204}; 199};
205 200
206int sun4i_rgb_init(struct drm_device *drm) 201int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
207{ 202{
208 struct sun4i_drv *drv = drm->dev_private;
209 struct sun4i_tcon *tcon = drv->tcon;
210 struct drm_encoder *encoder; 203 struct drm_encoder *encoder;
211 struct drm_bridge *bridge; 204 struct drm_bridge *bridge;
212 struct sun4i_rgb *rgb; 205 struct sun4i_rgb *rgb;
@@ -215,12 +208,12 @@ int sun4i_rgb_init(struct drm_device *drm)
215 rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL); 208 rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL);
216 if (!rgb) 209 if (!rgb)
217 return -ENOMEM; 210 return -ENOMEM;
218 rgb->drv = drv; 211 rgb->tcon = tcon;
219 encoder = &rgb->encoder; 212 encoder = &rgb->encoder;
220 213
221 tcon->panel = sun4i_tcon_find_panel(tcon->dev->of_node); 214 ret = drm_of_find_panel_or_bridge(tcon->dev->of_node, 1, 0,
222 bridge = sun4i_tcon_find_bridge(tcon->dev->of_node); 215 &tcon->panel, &bridge);
223 if (IS_ERR(tcon->panel) && IS_ERR(bridge)) { 216 if (ret) {
224 dev_info(drm->dev, "No panel or bridge found... RGB output disabled\n"); 217 dev_info(drm->dev, "No panel or bridge found... RGB output disabled\n");
225 return 0; 218 return 0;
226 } 219 }
@@ -238,9 +231,9 @@ int sun4i_rgb_init(struct drm_device *drm)
238 } 231 }
239 232
240 /* The RGB encoder can only work with the TCON channel 0 */ 233 /* The RGB encoder can only work with the TCON channel 0 */
241 rgb->encoder.possible_crtcs = BIT(0); 234 rgb->encoder.possible_crtcs = BIT(drm_crtc_index(&tcon->crtc->crtc));
242 235
243 if (!IS_ERR(tcon->panel)) { 236 if (tcon->panel) {
244 drm_connector_helper_add(&rgb->connector, 237 drm_connector_helper_add(&rgb->connector,
245 &sun4i_rgb_con_helper_funcs); 238 &sun4i_rgb_con_helper_funcs);
246 ret = drm_connector_init(drm, &rgb->connector, 239 ret = drm_connector_init(drm, &rgb->connector,
@@ -261,7 +254,7 @@ int sun4i_rgb_init(struct drm_device *drm)
261 } 254 }
262 } 255 }
263 256
264 if (!IS_ERR(bridge)) { 257 if (bridge) {
265 ret = drm_bridge_attach(encoder, bridge, NULL); 258 ret = drm_bridge_attach(encoder, bridge, NULL);
266 if (ret) { 259 if (ret) {
267 dev_err(drm->dev, "Couldn't attach our bridge\n"); 260 dev_err(drm->dev, "Couldn't attach our bridge\n");
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.h b/drivers/gpu/drm/sun4i/sun4i_rgb.h
index 7c4da4c8acdd..40c18f4a6c7e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.h
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.h
@@ -13,6 +13,6 @@
13#ifndef _SUN4I_RGB_H_ 13#ifndef _SUN4I_RGB_H_
14#define _SUN4I_RGB_H_ 14#define _SUN4I_RGB_H_
15 15
16int sun4i_rgb_init(struct drm_device *drm); 16int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon);
17 17
18#endif /* _SUN4I_RGB_H_ */ 18#endif /* _SUN4I_RGB_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index ea2906f87cb9..9a83a85529ac 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -15,13 +15,12 @@
15#include <drm/drm_crtc.h> 15#include <drm/drm_crtc.h>
16#include <drm/drm_crtc_helper.h> 16#include <drm/drm_crtc_helper.h>
17#include <drm/drm_modes.h> 17#include <drm/drm_modes.h>
18#include <drm/drm_panel.h> 18#include <drm/drm_of.h>
19 19
20#include <linux/component.h> 20#include <linux/component.h>
21#include <linux/ioport.h> 21#include <linux/ioport.h>
22#include <linux/of_address.h> 22#include <linux/of_address.h>
23#include <linux/of_device.h> 23#include <linux/of_device.h>
24#include <linux/of_graph.h>
25#include <linux/of_irq.h> 24#include <linux/of_irq.h>
26#include <linux/regmap.h> 25#include <linux/regmap.h>
27#include <linux/reset.h> 26#include <linux/reset.h>
@@ -143,7 +142,7 @@ void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon,
143 142
144 /* 143 /*
145 * This is called a backporch in the register documentation, 144 * This is called a backporch in the register documentation,
146 * but it really is the front porch + hsync 145 * but it really is the back porch + hsync
147 */ 146 */
148 bp = mode->crtc_htotal - mode->crtc_hsync_start; 147 bp = mode->crtc_htotal - mode->crtc_hsync_start;
149 DRM_DEBUG_DRIVER("Setting horizontal total %d, backporch %d\n", 148 DRM_DEBUG_DRIVER("Setting horizontal total %d, backporch %d\n",
@@ -156,7 +155,7 @@ void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon,
156 155
157 /* 156 /*
158 * This is called a backporch in the register documentation, 157 * This is called a backporch in the register documentation,
159 * but it really is the front porch + hsync 158 * but it really is the back porch + hsync
160 */ 159 */
161 bp = mode->crtc_vtotal - mode->crtc_vsync_start; 160 bp = mode->crtc_vtotal - mode->crtc_vsync_start;
162 DRM_DEBUG_DRIVER("Setting vertical total %d, backporch %d\n", 161 DRM_DEBUG_DRIVER("Setting vertical total %d, backporch %d\n",
@@ -290,8 +289,7 @@ static irqreturn_t sun4i_tcon_handler(int irq, void *private)
290{ 289{
291 struct sun4i_tcon *tcon = private; 290 struct sun4i_tcon *tcon = private;
292 struct drm_device *drm = tcon->drm; 291 struct drm_device *drm = tcon->drm;
293 struct sun4i_drv *drv = drm->dev_private; 292 struct sun4i_crtc *scrtc = tcon->crtc;
294 struct sun4i_crtc *scrtc = drv->crtc;
295 unsigned int status; 293 unsigned int status;
296 294
297 regmap_read(tcon->regs, SUN4I_TCON_GINT0_REG, &status); 295 regmap_read(tcon->regs, SUN4I_TCON_GINT0_REG, &status);
@@ -336,12 +334,11 @@ static int sun4i_tcon_init_clocks(struct device *dev,
336 } 334 }
337 } 335 }
338 336
339 return sun4i_dclk_create(dev, tcon); 337 return 0;
340} 338}
341 339
342static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon) 340static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon)
343{ 341{
344 sun4i_dclk_free(tcon);
345 clk_disable_unprepare(tcon->clk); 342 clk_disable_unprepare(tcon->clk);
346} 343}
347 344
@@ -405,74 +402,6 @@ static int sun4i_tcon_init_regmap(struct device *dev,
405 return 0; 402 return 0;
406} 403}
407 404
408struct drm_panel *sun4i_tcon_find_panel(struct device_node *node)
409{
410 struct device_node *port, *remote, *child;
411 struct device_node *end_node = NULL;
412
413 /* Inputs are listed first, then outputs */
414 port = of_graph_get_port_by_id(node, 1);
415
416 /*
417 * Our first output is the RGB interface where the panel will
418 * be connected.
419 */
420 for_each_child_of_node(port, child) {
421 u32 reg;
422
423 of_property_read_u32(child, "reg", &reg);
424 if (reg == 0)
425 end_node = child;
426 }
427
428 if (!end_node) {
429 DRM_DEBUG_DRIVER("Missing panel endpoint\n");
430 return ERR_PTR(-ENODEV);
431 }
432
433 remote = of_graph_get_remote_port_parent(end_node);
434 if (!remote) {
435 DRM_DEBUG_DRIVER("Unable to parse remote node\n");
436 return ERR_PTR(-EINVAL);
437 }
438
439 return of_drm_find_panel(remote) ?: ERR_PTR(-EPROBE_DEFER);
440}
441
442struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node)
443{
444 struct device_node *port, *remote, *child;
445 struct device_node *end_node = NULL;
446
447 /* Inputs are listed first, then outputs */
448 port = of_graph_get_port_by_id(node, 1);
449
450 /*
451 * Our first output is the RGB interface where the panel will
452 * be connected.
453 */
454 for_each_child_of_node(port, child) {
455 u32 reg;
456
457 of_property_read_u32(child, "reg", &reg);
458 if (reg == 0)
459 end_node = child;
460 }
461
462 if (!end_node) {
463 DRM_DEBUG_DRIVER("Missing bridge endpoint\n");
464 return ERR_PTR(-ENODEV);
465 }
466
467 remote = of_graph_get_remote_port_parent(end_node);
468 if (!remote) {
469 DRM_DEBUG_DRIVER("Enable to parse remote node\n");
470 return ERR_PTR(-EINVAL);
471 }
472
473 return of_drm_find_bridge(remote) ?: ERR_PTR(-EPROBE_DEFER);
474}
475
476static int sun4i_tcon_bind(struct device *dev, struct device *master, 405static int sun4i_tcon_bind(struct device *dev, struct device *master,
477 void *data) 406 void *data)
478{ 407{
@@ -506,30 +435,45 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
506 return ret; 435 return ret;
507 } 436 }
508 437
438 ret = sun4i_tcon_init_clocks(dev, tcon);
439 if (ret) {
440 dev_err(dev, "Couldn't init our TCON clocks\n");
441 goto err_assert_reset;
442 }
443
509 ret = sun4i_tcon_init_regmap(dev, tcon); 444 ret = sun4i_tcon_init_regmap(dev, tcon);
510 if (ret) { 445 if (ret) {
511 dev_err(dev, "Couldn't init our TCON regmap\n"); 446 dev_err(dev, "Couldn't init our TCON regmap\n");
512 goto err_assert_reset; 447 goto err_free_clocks;
513 } 448 }
514 449
515 ret = sun4i_tcon_init_clocks(dev, tcon); 450 ret = sun4i_dclk_create(dev, tcon);
516 if (ret) { 451 if (ret) {
517 dev_err(dev, "Couldn't init our TCON clocks\n"); 452 dev_err(dev, "Couldn't create our TCON dot clock\n");
518 goto err_assert_reset; 453 goto err_free_clocks;
519 } 454 }
520 455
521 ret = sun4i_tcon_init_irq(dev, tcon); 456 ret = sun4i_tcon_init_irq(dev, tcon);
522 if (ret) { 457 if (ret) {
523 dev_err(dev, "Couldn't init our TCON interrupts\n"); 458 dev_err(dev, "Couldn't init our TCON interrupts\n");
459 goto err_free_dotclock;
460 }
461
462 tcon->crtc = sun4i_crtc_init(drm, drv->backend, tcon);
463 if (IS_ERR(tcon->crtc)) {
464 dev_err(dev, "Couldn't create our CRTC\n");
465 ret = PTR_ERR(tcon->crtc);
524 goto err_free_clocks; 466 goto err_free_clocks;
525 } 467 }
526 468
527 ret = sun4i_rgb_init(drm); 469 ret = sun4i_rgb_init(drm, tcon);
528 if (ret < 0) 470 if (ret < 0)
529 goto err_free_clocks; 471 goto err_free_clocks;
530 472
531 return 0; 473 return 0;
532 474
475err_free_dotclock:
476 sun4i_dclk_free(tcon);
533err_free_clocks: 477err_free_clocks:
534 sun4i_tcon_free_clocks(tcon); 478 sun4i_tcon_free_clocks(tcon);
535err_assert_reset: 479err_assert_reset:
@@ -542,6 +486,7 @@ static void sun4i_tcon_unbind(struct device *dev, struct device *master,
542{ 486{
543 struct sun4i_tcon *tcon = dev_get_drvdata(dev); 487 struct sun4i_tcon *tcon = dev_get_drvdata(dev);
544 488
489 sun4i_dclk_free(tcon);
545 sun4i_tcon_free_clocks(tcon); 490 sun4i_tcon_free_clocks(tcon);
546} 491}
547 492
@@ -555,22 +500,11 @@ static int sun4i_tcon_probe(struct platform_device *pdev)
555 struct device_node *node = pdev->dev.of_node; 500 struct device_node *node = pdev->dev.of_node;
556 struct drm_bridge *bridge; 501 struct drm_bridge *bridge;
557 struct drm_panel *panel; 502 struct drm_panel *panel;
503 int ret;
558 504
559 /* 505 ret = drm_of_find_panel_or_bridge(node, 1, 0, &panel, &bridge);
560 * Neither the bridge or the panel is ready. 506 if (ret == -EPROBE_DEFER)
561 * Defer the probe. 507 return ret;
562 */
563 panel = sun4i_tcon_find_panel(node);
564 bridge = sun4i_tcon_find_bridge(node);
565
566 /*
567 * If we don't have a panel endpoint, just go on
568 */
569 if ((PTR_ERR(panel) == -EPROBE_DEFER) &&
570 (PTR_ERR(bridge) == -EPROBE_DEFER)) {
571 DRM_DEBUG_DRIVER("Still waiting for our panel/bridge. Deferring...\n");
572 return -EPROBE_DEFER;
573 }
574 508
575 return component_add(&pdev->dev, &sun4i_tcon_ops); 509 return component_add(&pdev->dev, &sun4i_tcon_ops);
576} 510}
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index 166064bafe2e..f636343a935d 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -169,6 +169,9 @@ struct sun4i_tcon {
169 169
170 /* Platform adjustments */ 170 /* Platform adjustments */
171 const struct sun4i_tcon_quirks *quirks; 171 const struct sun4i_tcon_quirks *quirks;
172
173 /* Associated crtc */
174 struct sun4i_crtc *crtc;
172}; 175};
173 176
174struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node); 177struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index c6f47222e8fc..49c49431a053 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -19,9 +19,11 @@
19#include <drm/drmP.h> 19#include <drm/drmP.h>
20#include <drm/drm_atomic_helper.h> 20#include <drm/drm_atomic_helper.h>
21#include <drm/drm_crtc_helper.h> 21#include <drm/drm_crtc_helper.h>
22#include <drm/drm_of.h>
22#include <drm/drm_panel.h> 23#include <drm/drm_panel.h>
23 24
24#include "sun4i_backend.h" 25#include "sun4i_backend.h"
26#include "sun4i_crtc.h"
25#include "sun4i_drv.h" 27#include "sun4i_drv.h"
26#include "sun4i_tcon.h" 28#include "sun4i_tcon.h"
27 29
@@ -349,8 +351,9 @@ static int sun4i_tv_atomic_check(struct drm_encoder *encoder,
349static void sun4i_tv_disable(struct drm_encoder *encoder) 351static void sun4i_tv_disable(struct drm_encoder *encoder)
350{ 352{
351 struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder); 353 struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
352 struct sun4i_drv *drv = tv->drv; 354 struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
353 struct sun4i_tcon *tcon = drv->tcon; 355 struct sun4i_tcon *tcon = crtc->tcon;
356 struct sun4i_backend *backend = crtc->backend;
354 357
355 DRM_DEBUG_DRIVER("Disabling the TV Output\n"); 358 DRM_DEBUG_DRIVER("Disabling the TV Output\n");
356 359
@@ -359,18 +362,19 @@ static void sun4i_tv_disable(struct drm_encoder *encoder)
359 regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG, 362 regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
360 SUN4I_TVE_EN_ENABLE, 363 SUN4I_TVE_EN_ENABLE,
361 0); 364 0);
362 sun4i_backend_disable_color_correction(drv->backend); 365 sun4i_backend_disable_color_correction(backend);
363} 366}
364 367
365static void sun4i_tv_enable(struct drm_encoder *encoder) 368static void sun4i_tv_enable(struct drm_encoder *encoder)
366{ 369{
367 struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder); 370 struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
368 struct sun4i_drv *drv = tv->drv; 371 struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
369 struct sun4i_tcon *tcon = drv->tcon; 372 struct sun4i_tcon *tcon = crtc->tcon;
373 struct sun4i_backend *backend = crtc->backend;
370 374
371 DRM_DEBUG_DRIVER("Enabling the TV Output\n"); 375 DRM_DEBUG_DRIVER("Enabling the TV Output\n");
372 376
373 sun4i_backend_apply_color_correction(drv->backend); 377 sun4i_backend_apply_color_correction(backend);
374 378
375 regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG, 379 regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
376 SUN4I_TVE_EN_ENABLE, 380 SUN4I_TVE_EN_ENABLE,
@@ -384,8 +388,8 @@ static void sun4i_tv_mode_set(struct drm_encoder *encoder,
384 struct drm_display_mode *adjusted_mode) 388 struct drm_display_mode *adjusted_mode)
385{ 389{
386 struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder); 390 struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
387 struct sun4i_drv *drv = tv->drv; 391 struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
388 struct sun4i_tcon *tcon = drv->tcon; 392 struct sun4i_tcon *tcon = crtc->tcon;
389 const struct tv_mode *tv_mode = sun4i_tv_find_tv_by_mode(mode); 393 const struct tv_mode *tv_mode = sun4i_tv_find_tv_by_mode(mode);
390 394
391 sun4i_tcon1_mode_set(tcon, mode); 395 sun4i_tcon1_mode_set(tcon, mode);
@@ -623,7 +627,12 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
623 goto err_disable_clk; 627 goto err_disable_clk;
624 } 628 }
625 629
626 tv->encoder.possible_crtcs = BIT(0); 630 tv->encoder.possible_crtcs = drm_of_find_possible_crtcs(drm,
631 dev->of_node);
632 if (!tv->encoder.possible_crtcs) {
633 ret = -EPROBE_DEFER;
634 goto err_disable_clk;
635 }
627 636
628 drm_connector_helper_add(&tv->connector, 637 drm_connector_helper_add(&tv->connector,
629 &sun4i_tv_comp_connector_helper_funcs); 638 &sun4i_tv_comp_connector_helper_funcs);
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 17e62ecb5d4d..8672f5d2f237 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -619,10 +619,10 @@ static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
619 .map_dma_buf = tegra_gem_prime_map_dma_buf, 619 .map_dma_buf = tegra_gem_prime_map_dma_buf,
620 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf, 620 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
621 .release = tegra_gem_prime_release, 621 .release = tegra_gem_prime_release,
622 .kmap_atomic = tegra_gem_prime_kmap_atomic, 622 .map_atomic = tegra_gem_prime_kmap_atomic,
623 .kunmap_atomic = tegra_gem_prime_kunmap_atomic, 623 .unmap_atomic = tegra_gem_prime_kunmap_atomic,
624 .kmap = tegra_gem_prime_kmap, 624 .map = tegra_gem_prime_kmap,
625 .kunmap = tegra_gem_prime_kunmap, 625 .unmap = tegra_gem_prime_kunmap,
626 .mmap = tegra_gem_prime_mmap, 626 .mmap = tegra_gem_prime_mmap,
627 .vmap = tegra_gem_prime_vmap, 627 .vmap = tegra_gem_prime_vmap,
628 .vunmap = tegra_gem_prime_vunmap, 628 .vunmap = tegra_gem_prime_vunmap,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index afd2a7b2aff7..d524ed0d5146 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -23,6 +23,7 @@
23#include <linux/workqueue.h> 23#include <linux/workqueue.h>
24#include <linux/completion.h> 24#include <linux/completion.h>
25#include <linux/dma-mapping.h> 25#include <linux/dma-mapping.h>
26#include <linux/of_graph.h>
26 27
27#include "tilcdc_drv.h" 28#include "tilcdc_drv.h"
28#include "tilcdc_regs.h" 29#include "tilcdc_regs.h"
@@ -1035,16 +1036,7 @@ int tilcdc_crtc_create(struct drm_device *dev)
1035 drm_crtc_helper_add(crtc, &tilcdc_crtc_helper_funcs); 1036 drm_crtc_helper_add(crtc, &tilcdc_crtc_helper_funcs);
1036 1037
1037 if (priv->is_componentized) { 1038 if (priv->is_componentized) {
1038 struct device_node *ports = 1039 crtc->port = of_graph_get_port_by_id(dev->dev->of_node, 0);
1039 of_get_child_by_name(dev->dev->of_node, "ports");
1040
1041 if (ports) {
1042 crtc->port = of_get_child_by_name(ports, "port");
1043 of_node_put(ports);
1044 } else {
1045 crtc->port =
1046 of_get_child_by_name(dev->dev->of_node, "port");
1047 }
1048 if (!crtc->port) { /* This should never happen */ 1040 if (!crtc->port) { /* This should never happen */
1049 dev_err(dev->dev, "Port node not found in %s\n", 1041 dev_err(dev->dev, "Port node not found in %s\n",
1050 dev->dev->of_node->full_name); 1042 dev->dev->of_node->full_name);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index b0dd5e8634ae..711c7b3289d3 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -185,39 +185,6 @@ int tilcdc_attach_bridge(struct drm_device *ddev, struct drm_bridge *bridge)
185 return ret; 185 return ret;
186} 186}
187 187
188static int tilcdc_node_has_port(struct device_node *dev_node)
189{
190 struct device_node *node;
191
192 node = of_get_child_by_name(dev_node, "ports");
193 if (!node)
194 node = of_get_child_by_name(dev_node, "port");
195 if (!node)
196 return 0;
197 of_node_put(node);
198
199 return 1;
200}
201
202static
203struct device_node *tilcdc_get_remote_node(struct device_node *node)
204{
205 struct device_node *ep;
206 struct device_node *parent;
207
208 if (!tilcdc_node_has_port(node))
209 return NULL;
210
211 ep = of_graph_get_next_endpoint(node, NULL);
212 if (!ep)
213 return NULL;
214
215 parent = of_graph_get_remote_port_parent(ep);
216 of_node_put(ep);
217
218 return parent;
219}
220
221int tilcdc_attach_external_device(struct drm_device *ddev) 188int tilcdc_attach_external_device(struct drm_device *ddev)
222{ 189{
223 struct tilcdc_drm_private *priv = ddev->dev_private; 190 struct tilcdc_drm_private *priv = ddev->dev_private;
@@ -225,7 +192,7 @@ int tilcdc_attach_external_device(struct drm_device *ddev)
225 struct drm_bridge *bridge; 192 struct drm_bridge *bridge;
226 int ret; 193 int ret;
227 194
228 remote_node = tilcdc_get_remote_node(ddev->dev->of_node); 195 remote_node = of_graph_get_remote_node(ddev->dev->of_node, 0, 0);
229 if (!remote_node) 196 if (!remote_node)
230 return 0; 197 return 0;
231 198
@@ -264,35 +231,16 @@ int tilcdc_get_external_components(struct device *dev,
264 struct component_match **match) 231 struct component_match **match)
265{ 232{
266 struct device_node *node; 233 struct device_node *node;
267 struct device_node *ep = NULL;
268 int count = 0;
269 int ret = 0;
270
271 if (!tilcdc_node_has_port(dev->of_node))
272 return 0;
273 234
274 while ((ep = of_graph_get_next_endpoint(dev->of_node, ep))) { 235 node = of_graph_get_remote_node(dev->of_node, 0, 0);
275 node = of_graph_get_remote_port_parent(ep);
276 if (!node || !of_device_is_available(node)) {
277 of_node_put(node);
278 continue;
279 }
280
281 dev_dbg(dev, "Subdevice node '%s' found\n", node->name);
282
283 if (of_device_is_compatible(node, "nxp,tda998x")) {
284 if (match)
285 drm_of_component_match_add(dev, match,
286 dev_match_of, node);
287 ret = 1;
288 }
289 236
237 if (!of_device_is_compatible(node, "nxp,tda998x")) {
290 of_node_put(node); 238 of_node_put(node);
291 if (count++ > 1) { 239 return 0;
292 dev_err(dev, "Only one port is supported\n");
293 return -EINVAL;
294 }
295 } 240 }
296 241
297 return ret; 242 if (match)
243 drm_of_component_match_add(dev, match, dev_match_of, node);
244 of_node_put(node);
245 return 1;
298} 246}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 412240a3ba90..e44626a2e698 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1020,37 +1020,44 @@ out_unlock:
1020 return ret; 1020 return ret;
1021} 1021}
1022 1022
1023bool ttm_bo_mem_compat(struct ttm_placement *placement, 1023static bool ttm_bo_places_compat(const struct ttm_place *places,
1024 struct ttm_mem_reg *mem, 1024 unsigned num_placement,
1025 uint32_t *new_flags) 1025 struct ttm_mem_reg *mem,
1026 uint32_t *new_flags)
1026{ 1027{
1027 int i; 1028 unsigned i;
1028
1029 for (i = 0; i < placement->num_placement; i++) {
1030 const struct ttm_place *heap = &placement->placement[i];
1031 if (mem->mm_node &&
1032 (mem->start < heap->fpfn ||
1033 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1034 continue;
1035 1029
1036 *new_flags = heap->flags; 1030 for (i = 0; i < num_placement; i++) {
1037 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && 1031 const struct ttm_place *heap = &places[i];
1038 (*new_flags & mem->placement & TTM_PL_MASK_MEM))
1039 return true;
1040 }
1041 1032
1042 for (i = 0; i < placement->num_busy_placement; i++) { 1033 if (mem->mm_node && (mem->start < heap->fpfn ||
1043 const struct ttm_place *heap = &placement->busy_placement[i];
1044 if (mem->mm_node &&
1045 (mem->start < heap->fpfn ||
1046 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) 1034 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1047 continue; 1035 continue;
1048 1036
1049 *new_flags = heap->flags; 1037 *new_flags = heap->flags;
1050 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && 1038 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1051 (*new_flags & mem->placement & TTM_PL_MASK_MEM)) 1039 (*new_flags & mem->placement & TTM_PL_MASK_MEM) &&
1040 (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
1041 (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
1052 return true; 1042 return true;
1053 } 1043 }
1044 return false;
1045}
1046
1047bool ttm_bo_mem_compat(struct ttm_placement *placement,
1048 struct ttm_mem_reg *mem,
1049 uint32_t *new_flags)
1050{
1051 if (ttm_bo_places_compat(placement->placement, placement->num_placement,
1052 mem, new_flags))
1053 return true;
1054
1055 if ((placement->busy_placement != placement->placement ||
1056 placement->num_busy_placement > placement->num_placement) &&
1057 ttm_bo_places_compat(placement->busy_placement,
1058 placement->num_busy_placement,
1059 mem, new_flags))
1060 return true;
1054 1061
1055 return false; 1062 return false;
1056} 1063}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 35ffb3754feb..9f53df95f35c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -231,7 +231,7 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
231 */ 231 */
232 for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { 232 for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
233 if (bo->mem.bus.is_iomem) 233 if (bo->mem.bus.is_iomem)
234 pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset; 234 pfn = bdev->driver->io_mem_pfn(bo, page_offset);
235 else { 235 else {
236 page = ttm->pages[page_offset]; 236 page = ttm->pages[page_offset];
237 if (unlikely(!page && i == 0)) { 237 if (unlikely(!page && i == 0)) {
@@ -324,6 +324,14 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
324 return bo; 324 return bo;
325} 325}
326 326
327unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo,
328 unsigned long page_offset)
329{
330 return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT)
331 + page_offset;
332}
333EXPORT_SYMBOL(ttm_bo_default_io_mem_pfn);
334
327int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, 335int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
328 struct ttm_bo_device *bdev) 336 struct ttm_bo_device *bdev)
329{ 337{
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index fdb451e3ec01..26a7ad0f4789 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -179,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
179 if (unlikely(ret != 0)) 179 if (unlikely(ret != 0))
180 goto out_err0; 180 goto out_err0;
181 181
182 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); 182 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
183 if (unlikely(ret != 0)) 183 if (unlikely(ret != 0))
184 goto out_err1; 184 goto out_err1;
185 185
@@ -318,7 +318,8 @@ EXPORT_SYMBOL(ttm_ref_object_exists);
318 318
319int ttm_ref_object_add(struct ttm_object_file *tfile, 319int ttm_ref_object_add(struct ttm_object_file *tfile,
320 struct ttm_base_object *base, 320 struct ttm_base_object *base,
321 enum ttm_ref_type ref_type, bool *existed) 321 enum ttm_ref_type ref_type, bool *existed,
322 bool require_existed)
322{ 323{
323 struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; 324 struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
324 struct ttm_ref_object *ref; 325 struct ttm_ref_object *ref;
@@ -345,6 +346,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
345 } 346 }
346 347
347 rcu_read_unlock(); 348 rcu_read_unlock();
349 if (require_existed)
350 return -EPERM;
351
348 ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), 352 ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
349 false, false); 353 false, false);
350 if (unlikely(ret != 0)) 354 if (unlikely(ret != 0))
@@ -449,10 +453,10 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
449 ttm_ref_object_release(&ref->kref); 453 ttm_ref_object_release(&ref->kref);
450 } 454 }
451 455
456 spin_unlock(&tfile->lock);
452 for (i = 0; i < TTM_REF_NUM; ++i) 457 for (i = 0; i < TTM_REF_NUM; ++i)
453 drm_ht_remove(&tfile->ref_hash[i]); 458 drm_ht_remove(&tfile->ref_hash[i]);
454 459
455 spin_unlock(&tfile->lock);
456 ttm_object_file_unref(&tfile); 460 ttm_object_file_unref(&tfile);
457} 461}
458EXPORT_SYMBOL(ttm_object_file_release); 462EXPORT_SYMBOL(ttm_object_file_release);
@@ -529,9 +533,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
529 533
530 *p_tdev = NULL; 534 *p_tdev = NULL;
531 535
532 spin_lock(&tdev->object_lock);
533 drm_ht_remove(&tdev->object_hash); 536 drm_ht_remove(&tdev->object_hash);
534 spin_unlock(&tdev->object_lock);
535 537
536 kfree(tdev); 538 kfree(tdev);
537} 539}
@@ -635,7 +637,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
635 prime = (struct ttm_prime_object *) dma_buf->priv; 637 prime = (struct ttm_prime_object *) dma_buf->priv;
636 base = &prime->base; 638 base = &prime->base;
637 *handle = base->hash.key; 639 *handle = base->hash.key;
638 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); 640 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
639 641
640 dma_buf_put(dma_buf); 642 dma_buf_put(dma_buf);
641 643
diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c
index ac90ffdb5912..ed0e636243b2 100644
--- a/drivers/gpu/drm/udl/udl_dmabuf.c
+++ b/drivers/gpu/drm/udl/udl_dmabuf.c
@@ -191,10 +191,10 @@ static struct dma_buf_ops udl_dmabuf_ops = {
191 .detach = udl_detach_dma_buf, 191 .detach = udl_detach_dma_buf,
192 .map_dma_buf = udl_map_dma_buf, 192 .map_dma_buf = udl_map_dma_buf,
193 .unmap_dma_buf = udl_unmap_dma_buf, 193 .unmap_dma_buf = udl_unmap_dma_buf,
194 .kmap = udl_dmabuf_kmap, 194 .map = udl_dmabuf_kmap,
195 .kmap_atomic = udl_dmabuf_kmap_atomic, 195 .map_atomic = udl_dmabuf_kmap_atomic,
196 .kunmap = udl_dmabuf_kunmap, 196 .unmap = udl_dmabuf_kunmap,
197 .kunmap_atomic = udl_dmabuf_kunmap_atomic, 197 .unmap_atomic = udl_dmabuf_kunmap_atomic,
198 .mmap = udl_dmabuf_mmap, 198 .mmap = udl_dmabuf_mmap,
199 .release = drm_gem_dmabuf_release, 199 .release = drm_gem_dmabuf_release,
200}; 200};
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 917dcb978c2c..0c87b1ac6b68 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -14,6 +14,7 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/fb.h> 15#include <linux/fb.h>
16#include <linux/prefetch.h> 16#include <linux/prefetch.h>
17#include <asm/unaligned.h>
17 18
18#include <drm/drmP.h> 19#include <drm/drmP.h>
19#include "udl_drv.h" 20#include "udl_drv.h"
@@ -163,7 +164,7 @@ static void udl_compress_hline16(
163 const u8 *const start = pixel; 164 const u8 *const start = pixel;
164 const uint16_t repeating_pixel_val16 = pixel_val16; 165 const uint16_t repeating_pixel_val16 = pixel_val16;
165 166
166 *(uint16_t *)cmd = cpu_to_be16(pixel_val16); 167 put_unaligned_be16(pixel_val16, cmd);
167 168
168 cmd += 2; 169 cmd += 2;
169 pixel += bpp; 170 pixel += bpp;
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 865e9f494bcc..d86c8cce3182 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -314,7 +314,8 @@ vc4_crtc_lut_load(struct drm_crtc *crtc)
314 314
315static int 315static int
316vc4_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, 316vc4_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
317 uint32_t size) 317 uint32_t size,
318 struct drm_modeset_acquire_ctx *ctx)
318{ 319{
319 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 320 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
320 u32 i; 321 u32 i;
@@ -846,6 +847,17 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
846 drm_atomic_helper_crtc_destroy_state(crtc, state); 847 drm_atomic_helper_crtc_destroy_state(crtc, state);
847} 848}
848 849
850static void
851vc4_crtc_reset(struct drm_crtc *crtc)
852{
853 if (crtc->state)
854 __drm_atomic_helper_crtc_destroy_state(crtc->state);
855
856 crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
857 if (crtc->state)
858 crtc->state->crtc = crtc;
859}
860
849static const struct drm_crtc_funcs vc4_crtc_funcs = { 861static const struct drm_crtc_funcs vc4_crtc_funcs = {
850 .set_config = drm_atomic_helper_set_config, 862 .set_config = drm_atomic_helper_set_config,
851 .destroy = vc4_crtc_destroy, 863 .destroy = vc4_crtc_destroy,
@@ -853,7 +865,7 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
853 .set_property = NULL, 865 .set_property = NULL,
854 .cursor_set = NULL, /* handled by drm_mode_cursor_universal */ 866 .cursor_set = NULL, /* handled by drm_mode_cursor_universal */
855 .cursor_move = NULL, /* handled by drm_mode_cursor_universal */ 867 .cursor_move = NULL, /* handled by drm_mode_cursor_universal */
856 .reset = drm_atomic_helper_crtc_reset, 868 .reset = vc4_crtc_reset,
857 .atomic_duplicate_state = vc4_crtc_duplicate_state, 869 .atomic_duplicate_state = vc4_crtc_duplicate_state,
858 .atomic_destroy_state = vc4_crtc_destroy_state, 870 .atomic_destroy_state = vc4_crtc_destroy_state,
859 .gamma_set = vc4_crtc_gamma_set, 871 .gamma_set = vc4_crtc_gamma_set,
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 71435796c710..c6d703903fd9 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -356,23 +356,14 @@ static const struct of_device_id vc4_dpi_dt_match[] = {
356 */ 356 */
357static struct drm_panel *vc4_dpi_get_panel(struct device *dev) 357static struct drm_panel *vc4_dpi_get_panel(struct device *dev)
358{ 358{
359 struct device_node *endpoint, *panel_node; 359 struct device_node *panel_node;
360 struct device_node *np = dev->of_node; 360 struct device_node *np = dev->of_node;
361 struct drm_panel *panel; 361 struct drm_panel *panel;
362 362
363 endpoint = of_graph_get_next_endpoint(np, NULL);
364 if (!endpoint) {
365 dev_err(dev, "no endpoint to fetch DPI panel\n");
366 return NULL;
367 }
368
369 /* don't proceed if we have an endpoint but no panel_node tied to it */ 363 /* don't proceed if we have an endpoint but no panel_node tied to it */
370 panel_node = of_graph_get_remote_port_parent(endpoint); 364 panel_node = of_graph_get_remote_node(np, 0, 0);
371 of_node_put(endpoint); 365 if (!panel_node)
372 if (!panel_node) {
373 dev_err(dev, "no valid panel node\n");
374 return NULL; 366 return NULL;
375 }
376 367
377 panel = of_drm_find_panel(panel_node); 368 panel = of_drm_find_panel(panel_node);
378 of_node_put(panel_node); 369 of_node_put(panel_node);
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 93900a83dced..1328185bfd59 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -334,6 +334,7 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
334void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev); 334void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
335 335
336/* virtio_gpu_plane.c */ 336/* virtio_gpu_plane.c */
337uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc);
337struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, 338struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
338 enum drm_plane_type type, 339 enum drm_plane_type type,
339 int index); 340 int index);
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index 9bfaef379469..33df067b11c1 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -231,63 +231,9 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
231 mode_cmd.pitches[0] = mode_cmd.width * 4; 231 mode_cmd.pitches[0] = mode_cmd.width * 4;
232 mode_cmd.pixel_format = drm_mode_legacy_fb_format(32, 24); 232 mode_cmd.pixel_format = drm_mode_legacy_fb_format(32, 24);
233 233
234 switch (mode_cmd.pixel_format) { 234 format = virtio_gpu_translate_format(mode_cmd.pixel_format);
235#ifdef __BIG_ENDIAN 235 if (format == 0)
236 case DRM_FORMAT_XRGB8888:
237 format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
238 break;
239 case DRM_FORMAT_ARGB8888:
240 format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
241 break;
242 case DRM_FORMAT_BGRX8888:
243 format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
244 break;
245 case DRM_FORMAT_BGRA8888:
246 format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
247 break;
248 case DRM_FORMAT_RGBX8888:
249 format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
250 break;
251 case DRM_FORMAT_RGBA8888:
252 format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
253 break;
254 case DRM_FORMAT_XBGR8888:
255 format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
256 break;
257 case DRM_FORMAT_ABGR8888:
258 format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
259 break;
260#else
261 case DRM_FORMAT_XRGB8888:
262 format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
263 break;
264 case DRM_FORMAT_ARGB8888:
265 format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
266 break;
267 case DRM_FORMAT_BGRX8888:
268 format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
269 break;
270 case DRM_FORMAT_BGRA8888:
271 format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
272 break;
273 case DRM_FORMAT_RGBX8888:
274 format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
275 break;
276 case DRM_FORMAT_RGBA8888:
277 format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
278 break;
279 case DRM_FORMAT_XBGR8888:
280 format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
281 break;
282 case DRM_FORMAT_ABGR8888:
283 format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
284 break;
285#endif
286 default:
287 DRM_ERROR("failed to find virtio gpu format for %d\n",
288 mode_cmd.pixel_format);
289 return -EINVAL; 236 return -EINVAL;
290 }
291 237
292 size = mode_cmd.pitches[0] * mode_cmd.height; 238 size = mode_cmd.pitches[0] * mode_cmd.height;
293 obj = virtio_gpu_alloc_object(dev, size, false, true); 239 obj = virtio_gpu_alloc_object(dev, size, false, true);
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 336a57fd6d5d..cc025d8fbe19 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -88,6 +88,7 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
88 int ret; 88 int ret;
89 uint32_t pitch; 89 uint32_t pitch;
90 uint32_t resid; 90 uint32_t resid;
91 uint32_t format;
91 92
92 pitch = args->width * ((args->bpp + 1) / 8); 93 pitch = args->width * ((args->bpp + 1) / 8);
93 args->size = pitch * args->height; 94 args->size = pitch * args->height;
@@ -98,9 +99,10 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
98 if (ret) 99 if (ret)
99 goto fail; 100 goto fail;
100 101
102 format = virtio_gpu_translate_format(DRM_FORMAT_XRGB8888);
101 virtio_gpu_resource_id_get(vgdev, &resid); 103 virtio_gpu_resource_id_get(vgdev, &resid);
102 virtio_gpu_cmd_create_resource(vgdev, resid, 104 virtio_gpu_cmd_create_resource(vgdev, resid, format,
103 2, args->width, args->height); 105 args->width, args->height);
104 106
105 /* attach the object to the resource */ 107 /* attach the object to the resource */
106 obj = gem_to_virtio_gpu_obj(gobj); 108 obj = gem_to_virtio_gpu_obj(gobj);
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 1483daebe057..6f66b7347cd0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -81,8 +81,10 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
81 return -ENOMEM; 81 return -ENOMEM;
82 size = roundup(size, PAGE_SIZE); 82 size = roundup(size, PAGE_SIZE);
83 ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size); 83 ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size);
84 if (ret != 0) 84 if (ret != 0) {
85 kfree(bo);
85 return ret; 86 return ret;
87 }
86 bo->dumb = false; 88 bo->dumb = false;
87 virtio_gpu_init_ttm_placement(bo, pinned); 89 virtio_gpu_init_ttm_placement(bo, pinned);
88 90
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 1ff9c64c9ec0..adcdbd0abef6 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -39,9 +39,81 @@ static const uint32_t virtio_gpu_formats[] = {
39}; 39};
40 40
41static const uint32_t virtio_gpu_cursor_formats[] = { 41static const uint32_t virtio_gpu_cursor_formats[] = {
42#ifdef __BIG_ENDIAN
43 DRM_FORMAT_BGRA8888,
44#else
42 DRM_FORMAT_ARGB8888, 45 DRM_FORMAT_ARGB8888,
46#endif
43}; 47};
44 48
49uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
50{
51 uint32_t format;
52
53 switch (drm_fourcc) {
54#ifdef __BIG_ENDIAN
55 case DRM_FORMAT_XRGB8888:
56 format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
57 break;
58 case DRM_FORMAT_ARGB8888:
59 format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
60 break;
61 case DRM_FORMAT_BGRX8888:
62 format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
63 break;
64 case DRM_FORMAT_BGRA8888:
65 format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
66 break;
67 case DRM_FORMAT_RGBX8888:
68 format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
69 break;
70 case DRM_FORMAT_RGBA8888:
71 format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
72 break;
73 case DRM_FORMAT_XBGR8888:
74 format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
75 break;
76 case DRM_FORMAT_ABGR8888:
77 format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
78 break;
79#else
80 case DRM_FORMAT_XRGB8888:
81 format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
82 break;
83 case DRM_FORMAT_ARGB8888:
84 format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
85 break;
86 case DRM_FORMAT_BGRX8888:
87 format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
88 break;
89 case DRM_FORMAT_BGRA8888:
90 format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
91 break;
92 case DRM_FORMAT_RGBX8888:
93 format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
94 break;
95 case DRM_FORMAT_RGBA8888:
96 format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
97 break;
98 case DRM_FORMAT_XBGR8888:
99 format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
100 break;
101 case DRM_FORMAT_ABGR8888:
102 format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
103 break;
104#endif
105 default:
106 /*
107 * This should not happen, we handle everything listed
108 * in virtio_gpu_formats[].
109 */
110 format = 0;
111 break;
112 }
113 WARN_ON(format == 0);
114 return format;
115}
116
45static void virtio_gpu_plane_destroy(struct drm_plane *plane) 117static void virtio_gpu_plane_destroy(struct drm_plane *plane)
46{ 118{
47 drm_plane_cleanup(plane); 119 drm_plane_cleanup(plane);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index 70ec8ca8d9b1..4e8e27d50922 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -431,6 +431,7 @@ static struct ttm_bo_driver virtio_gpu_bo_driver = {
431 .verify_access = &virtio_gpu_verify_access, 431 .verify_access = &virtio_gpu_verify_access,
432 .io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve, 432 .io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve,
433 .io_mem_free = &virtio_gpu_ttm_io_mem_free, 433 .io_mem_free = &virtio_gpu_ttm_io_mem_free,
434 .io_mem_pfn = ttm_bo_default_io_mem_pfn,
434 .move_notify = &virtio_gpu_bo_move_notify, 435 .move_notify = &virtio_gpu_bo_move_notify,
435 .swap_notify = &virtio_gpu_bo_swap_notify, 436 .swap_notify = &virtio_gpu_bo_swap_notify,
436}; 437};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 4c7f24a67a2e..35bf781e418e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -859,4 +859,5 @@ struct ttm_bo_driver vmw_bo_driver = {
859 .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, 859 .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
860 .io_mem_reserve = &vmw_ttm_io_mem_reserve, 860 .io_mem_reserve = &vmw_ttm_io_mem_reserve,
861 .io_mem_free = &vmw_ttm_io_mem_free, 861 .io_mem_free = &vmw_ttm_io_mem_free,
862 .io_mem_pfn = ttm_bo_default_io_mem_pfn,
862}; 863};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index f6e936d90517..4a641555b960 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -254,7 +254,7 @@ module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
254MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); 254MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
255module_param_named(force_coherent, vmw_force_coherent, int, 0600); 255module_param_named(force_coherent, vmw_force_coherent, int, 0600);
256MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); 256MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
257module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, S_IRUSR | S_IWUSR); 257module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
258MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes"); 258MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
259module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600); 259module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
260 260
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 09e120d50e65..6f4cb4678cbc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -418,6 +418,60 @@ static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
418 return 0; 418 return 0;
419} 419}
420 420
421static int vmwgfx_set_config_internal(struct drm_mode_set *set)
422{
423 struct drm_crtc *crtc = set->crtc;
424 struct drm_framebuffer *fb;
425 struct drm_crtc *tmp;
426 struct drm_modeset_acquire_ctx *ctx;
427 struct drm_device *dev = set->crtc->dev;
428 int ret;
429
430 ctx = dev->mode_config.acquire_ctx;
431
432restart:
433 /*
434 * NOTE: ->set_config can also disable other crtcs (if we steal all
435 * connectors from it), hence we need to refcount the fbs across all
436 * crtcs. Atomic modeset will have saner semantics ...
437 */
438 drm_for_each_crtc(tmp, dev)
439 tmp->primary->old_fb = tmp->primary->fb;
440
441 fb = set->fb;
442
443 ret = crtc->funcs->set_config(set, ctx);
444 if (ret == 0) {
445 crtc->primary->crtc = crtc;
446 crtc->primary->fb = fb;
447 }
448
449 drm_for_each_crtc(tmp, dev) {
450 if (tmp->primary->fb)
451 drm_framebuffer_get(tmp->primary->fb);
452 if (tmp->primary->old_fb)
453 drm_framebuffer_put(tmp->primary->old_fb);
454 tmp->primary->old_fb = NULL;
455 }
456
457 if (ret == -EDEADLK) {
458 dev->mode_config.acquire_ctx = NULL;
459
460retry_locking:
461 drm_modeset_backoff(ctx);
462
463 ret = drm_modeset_lock_all_ctx(dev, ctx);
464 if (ret)
465 goto retry_locking;
466
467 dev->mode_config.acquire_ctx = ctx;
468
469 goto restart;
470 }
471
472 return ret;
473}
474
421static int vmw_fb_kms_detach(struct vmw_fb_par *par, 475static int vmw_fb_kms_detach(struct vmw_fb_par *par,
422 bool detach_bo, 476 bool detach_bo,
423 bool unref_bo) 477 bool unref_bo)
@@ -436,7 +490,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
436 set.fb = NULL; 490 set.fb = NULL;
437 set.num_connectors = 0; 491 set.num_connectors = 0;
438 set.connectors = &par->con; 492 set.connectors = &par->con;
439 ret = drm_mode_set_config_internal(&set); 493 ret = vmwgfx_set_config_internal(&set);
440 if (ret) { 494 if (ret) {
441 DRM_ERROR("Could not unset a mode.\n"); 495 DRM_ERROR("Could not unset a mode.\n");
442 return ret; 496 return ret;
@@ -578,7 +632,7 @@ static int vmw_fb_set_par(struct fb_info *info)
578 set.num_connectors = 1; 632 set.num_connectors = 1;
579 set.connectors = &par->con; 633 set.connectors = &par->con;
580 634
581 ret = drm_mode_set_config_internal(&set); 635 ret = vmwgfx_set_config_internal(&set);
582 if (ret) 636 if (ret)
583 goto out_unlock; 637 goto out_unlock;
584 638
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index b399f03a988d..6b2708b4eafe 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -538,7 +538,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
538 struct vmw_fence_obj **p_fence) 538 struct vmw_fence_obj **p_fence)
539{ 539{
540 struct vmw_fence_obj *fence; 540 struct vmw_fence_obj *fence;
541 int ret; 541 int ret;
542 542
543 fence = kzalloc(sizeof(*fence), GFP_KERNEL); 543 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
544 if (unlikely(fence == NULL)) 544 if (unlikely(fence == NULL))
@@ -701,6 +701,41 @@ void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
701} 701}
702 702
703 703
704/**
705 * vmw_fence_obj_lookup - Look up a user-space fence object
706 *
707 * @tfile: A struct ttm_object_file identifying the caller.
708 * @handle: A handle identifying the fence object.
709 * @return: A struct vmw_user_fence base ttm object on success or
710 * an error pointer on failure.
711 *
712 * The fence object is looked up and type-checked. The caller needs
713 * to have opened the fence object first, but since that happens on
714 * creation and fence objects aren't shareable, that's not an
715 * issue currently.
716 */
717static struct ttm_base_object *
718vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
719{
720 struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
721
722 if (!base) {
723 pr_err("Invalid fence object handle 0x%08lx.\n",
724 (unsigned long)handle);
725 return ERR_PTR(-EINVAL);
726 }
727
728 if (base->refcount_release != vmw_user_fence_base_release) {
729 pr_err("Invalid fence object handle 0x%08lx.\n",
730 (unsigned long)handle);
731 ttm_base_object_unref(&base);
732 return ERR_PTR(-EINVAL);
733 }
734
735 return base;
736}
737
738
704int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data, 739int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
705 struct drm_file *file_priv) 740 struct drm_file *file_priv)
706{ 741{
@@ -726,12 +761,9 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
726 arg->kernel_cookie = jiffies + wait_timeout; 761 arg->kernel_cookie = jiffies + wait_timeout;
727 } 762 }
728 763
729 base = ttm_base_object_lookup(tfile, arg->handle); 764 base = vmw_fence_obj_lookup(tfile, arg->handle);
730 if (unlikely(base == NULL)) { 765 if (IS_ERR(base))
731 pr_err("Wait invalid fence object handle 0x%08lx\n", 766 return PTR_ERR(base);
732 (unsigned long)arg->handle);
733 return -EINVAL;
734 }
735 767
736 fence = &(container_of(base, struct vmw_user_fence, base)->fence); 768 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
737 769
@@ -770,12 +802,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
770 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 802 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
771 struct vmw_private *dev_priv = vmw_priv(dev); 803 struct vmw_private *dev_priv = vmw_priv(dev);
772 804
773 base = ttm_base_object_lookup(tfile, arg->handle); 805 base = vmw_fence_obj_lookup(tfile, arg->handle);
774 if (unlikely(base == NULL)) { 806 if (IS_ERR(base))
775 pr_err("Fence signaled invalid fence object handle 0x%08lx\n", 807 return PTR_ERR(base);
776 (unsigned long)arg->handle);
777 return -EINVAL;
778 }
779 808
780 fence = &(container_of(base, struct vmw_user_fence, base)->fence); 809 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
781 fman = fman_from_fence(fence); 810 fman = fman_from_fence(fence);
@@ -1022,6 +1051,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1022 (struct drm_vmw_fence_event_arg *) data; 1051 (struct drm_vmw_fence_event_arg *) data;
1023 struct vmw_fence_obj *fence = NULL; 1052 struct vmw_fence_obj *fence = NULL;
1024 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1053 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1054 struct ttm_object_file *tfile = vmw_fp->tfile;
1025 struct drm_vmw_fence_rep __user *user_fence_rep = 1055 struct drm_vmw_fence_rep __user *user_fence_rep =
1026 (struct drm_vmw_fence_rep __user *)(unsigned long) 1056 (struct drm_vmw_fence_rep __user *)(unsigned long)
1027 arg->fence_rep; 1057 arg->fence_rep;
@@ -1035,24 +1065,18 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1035 */ 1065 */
1036 if (arg->handle) { 1066 if (arg->handle) {
1037 struct ttm_base_object *base = 1067 struct ttm_base_object *base =
1038 ttm_base_object_lookup_for_ref(dev_priv->tdev, 1068 vmw_fence_obj_lookup(tfile, arg->handle);
1039 arg->handle); 1069
1040 1070 if (IS_ERR(base))
1041 if (unlikely(base == NULL)) { 1071 return PTR_ERR(base);
1042 DRM_ERROR("Fence event invalid fence object handle " 1072
1043 "0x%08lx.\n",
1044 (unsigned long)arg->handle);
1045 return -EINVAL;
1046 }
1047 fence = &(container_of(base, struct vmw_user_fence, 1073 fence = &(container_of(base, struct vmw_user_fence,
1048 base)->fence); 1074 base)->fence);
1049 (void) vmw_fence_obj_reference(fence); 1075 (void) vmw_fence_obj_reference(fence);
1050 1076
1051 if (user_fence_rep != NULL) { 1077 if (user_fence_rep != NULL) {
1052 bool existed;
1053
1054 ret = ttm_ref_object_add(vmw_fp->tfile, base, 1078 ret = ttm_ref_object_add(vmw_fp->tfile, base,
1055 TTM_REF_USAGE, &existed); 1079 TTM_REF_USAGE, NULL, false);
1056 if (unlikely(ret != 0)) { 1080 if (unlikely(ret != 0)) {
1057 DRM_ERROR("Failed to reference a fence " 1081 DRM_ERROR("Failed to reference a fence "
1058 "object.\n"); 1082 "object.\n");
@@ -1095,8 +1119,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1095 return 0; 1119 return 0;
1096out_no_create: 1120out_no_create:
1097 if (user_fence_rep != NULL) 1121 if (user_fence_rep != NULL)
1098 ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, 1122 ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
1099 handle, TTM_REF_USAGE);
1100out_no_ref_obj: 1123out_no_ref_obj:
1101 vmw_fence_obj_unreference(&fence); 1124 vmw_fence_obj_unreference(&fence);
1102 return ret; 1125 return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index b8c6a03c8c54..5ec24fd801cd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -114,8 +114,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
114 param->value = dev_priv->has_dx; 114 param->value = dev_priv->has_dx;
115 break; 115 break;
116 default: 116 default:
117 DRM_ERROR("Illegal vmwgfx get param request: %d\n",
118 param->param);
119 return -EINVAL; 117 return -EINVAL;
120 } 118 }
121 119
@@ -186,7 +184,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
186 bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS); 184 bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
187 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 185 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
188 186
189 if (unlikely(arg->pad64 != 0)) { 187 if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
190 DRM_ERROR("Illegal GET_3D_CAP argument.\n"); 188 DRM_ERROR("Illegal GET_3D_CAP argument.\n");
191 return -EINVAL; 189 return -EINVAL;
192 } 190 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 6078654d033b..ef9f3a2a4030 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -2026,7 +2026,8 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
2026 2026
2027int vmw_du_crtc_gamma_set(struct drm_crtc *crtc, 2027int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2028 u16 *r, u16 *g, u16 *b, 2028 u16 *r, u16 *g, u16 *b,
2029 uint32_t size) 2029 uint32_t size,
2030 struct drm_modeset_acquire_ctx *ctx)
2030{ 2031{
2031 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 2032 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2032 int i; 2033 int i;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 0c226b2adea5..13f2f1d2818a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -254,7 +254,8 @@ void vmw_du_crtc_save(struct drm_crtc *crtc);
254void vmw_du_crtc_restore(struct drm_crtc *crtc); 254void vmw_du_crtc_restore(struct drm_crtc *crtc);
255int vmw_du_crtc_gamma_set(struct drm_crtc *crtc, 255int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
256 u16 *r, u16 *g, u16 *b, 256 u16 *r, u16 *g, u16 *b,
257 uint32_t size); 257 uint32_t size,
258 struct drm_modeset_acquire_ctx *ctx);
258int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv, 259int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
259 uint32_t handle, uint32_t width, uint32_t height, 260 uint32_t handle, uint32_t width, uint32_t height,
260 int32_t hot_x, int32_t hot_y); 261 int32_t hot_x, int32_t hot_y);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
index 31fe32d8d65a..0d42a46521fc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -108,10 +108,10 @@ const struct dma_buf_ops vmw_prime_dmabuf_ops = {
108 .map_dma_buf = vmw_prime_map_dma_buf, 108 .map_dma_buf = vmw_prime_map_dma_buf,
109 .unmap_dma_buf = vmw_prime_unmap_dma_buf, 109 .unmap_dma_buf = vmw_prime_unmap_dma_buf,
110 .release = NULL, 110 .release = NULL,
111 .kmap = vmw_prime_dmabuf_kmap, 111 .map = vmw_prime_dmabuf_kmap,
112 .kmap_atomic = vmw_prime_dmabuf_kmap_atomic, 112 .map_atomic = vmw_prime_dmabuf_kmap_atomic,
113 .kunmap = vmw_prime_dmabuf_kunmap, 113 .unmap = vmw_prime_dmabuf_kunmap,
114 .kunmap_atomic = vmw_prime_dmabuf_kunmap_atomic, 114 .unmap_atomic = vmw_prime_dmabuf_kunmap_atomic,
115 .mmap = vmw_prime_dmabuf_mmap, 115 .mmap = vmw_prime_dmabuf_mmap,
116 .vmap = vmw_prime_dmabuf_vmap, 116 .vmap = vmw_prime_dmabuf_vmap,
117 .vunmap = vmw_prime_dmabuf_vunmap, 117 .vunmap = vmw_prime_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index fa1037ec8e5f..7d591f653dfa 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -546,7 +546,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
546 return ret; 546 return ret;
547 547
548 ret = ttm_ref_object_add(tfile, &user_bo->prime.base, 548 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
549 TTM_REF_SYNCCPU_WRITE, &existed); 549 TTM_REF_SYNCCPU_WRITE, &existed, false);
550 if (ret != 0 || existed) 550 if (ret != 0 || existed)
551 ttm_bo_synccpu_write_release(&user_bo->dma.base); 551 ttm_bo_synccpu_write_release(&user_bo->dma.base);
552 552
@@ -730,7 +730,7 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
730 730
731 *handle = user_bo->prime.base.hash.key; 731 *handle = user_bo->prime.base.hash.key;
732 return ttm_ref_object_add(tfile, &user_bo->prime.base, 732 return ttm_ref_object_add(tfile, &user_bo->prime.base,
733 TTM_REF_USAGE, NULL); 733 TTM_REF_USAGE, NULL, false);
734} 734}
735 735
736/** 736/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 41b9d20d6ae7..7681341fe32b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -713,11 +713,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
713 128; 713 128;
714 714
715 num_sizes = 0; 715 num_sizes = 0;
716 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) 716 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
717 if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
718 return -EINVAL;
717 num_sizes += req->mip_levels[i]; 719 num_sizes += req->mip_levels[i];
720 }
718 721
719 if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * 722 if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
720 DRM_VMW_MAX_MIP_LEVELS) 723 num_sizes == 0)
721 return -EINVAL; 724 return -EINVAL;
722 725
723 size = vmw_user_surface_size + 128 + 726 size = vmw_user_surface_size + 128 +
@@ -890,17 +893,16 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
890 uint32_t handle; 893 uint32_t handle;
891 struct ttm_base_object *base; 894 struct ttm_base_object *base;
892 int ret; 895 int ret;
896 bool require_exist = false;
893 897
894 if (handle_type == DRM_VMW_HANDLE_PRIME) { 898 if (handle_type == DRM_VMW_HANDLE_PRIME) {
895 ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle); 899 ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
896 if (unlikely(ret != 0)) 900 if (unlikely(ret != 0))
897 return ret; 901 return ret;
898 } else { 902 } else {
899 if (unlikely(drm_is_render_client(file_priv))) { 903 if (unlikely(drm_is_render_client(file_priv)))
900 DRM_ERROR("Render client refused legacy " 904 require_exist = true;
901 "surface reference.\n"); 905
902 return -EACCES;
903 }
904 if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) { 906 if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
905 DRM_ERROR("Locked master refused legacy " 907 DRM_ERROR("Locked master refused legacy "
906 "surface reference.\n"); 908 "surface reference.\n");
@@ -928,17 +930,14 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
928 930
929 /* 931 /*
930 * Make sure the surface creator has the same 932 * Make sure the surface creator has the same
931 * authenticating master. 933 * authenticating master, or is already registered with us.
932 */ 934 */
933 if (drm_is_primary_client(file_priv) && 935 if (drm_is_primary_client(file_priv) &&
934 user_srf->master != file_priv->master) { 936 user_srf->master != file_priv->master)
935 DRM_ERROR("Trying to reference surface outside of" 937 require_exist = true;
936 " master domain.\n");
937 ret = -EACCES;
938 goto out_bad_resource;
939 }
940 938
941 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); 939 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
940 require_exist);
942 if (unlikely(ret != 0)) { 941 if (unlikely(ret != 0)) {
943 DRM_ERROR("Could not add a reference to a surface.\n"); 942 DRM_ERROR("Could not add a reference to a surface.\n");
944 goto out_bad_resource; 943 goto out_bad_resource;
diff --git a/drivers/gpu/ipu-v3/Makefile b/drivers/gpu/ipu-v3/Makefile
index 1ab9bceee755..8cdf9e4ae772 100644
--- a/drivers/gpu/ipu-v3/Makefile
+++ b/drivers/gpu/ipu-v3/Makefile
@@ -2,4 +2,8 @@ obj-$(CONFIG_IMX_IPUV3_CORE) += imx-ipu-v3.o
2 2
3imx-ipu-v3-objs := ipu-common.o ipu-cpmem.o ipu-csi.o ipu-dc.o ipu-di.o \ 3imx-ipu-v3-objs := ipu-common.o ipu-cpmem.o ipu-csi.o ipu-dc.o ipu-di.o \
4 ipu-dp.o ipu-dmfc.o ipu-ic.o ipu-image-convert.o \ 4 ipu-dp.o ipu-dmfc.o ipu-ic.o ipu-image-convert.o \
5 ipu-pre.o ipu-prg.o ipu-smfc.o ipu-vdi.o 5 ipu-smfc.o ipu-vdi.o
6
7ifdef CONFIG_DRM
8 imx-ipu-v3-objs += ipu-pre.o ipu-prg.o
9endif
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 7aefccec31b1..16d556816b5f 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1401,7 +1401,8 @@ static int ipu_probe(struct platform_device *pdev)
1401 1401
1402 ipu->id = of_alias_get_id(np, "ipu"); 1402 ipu->id = of_alias_get_id(np, "ipu");
1403 1403
1404 if (of_device_is_compatible(np, "fsl,imx6qp-ipu")) { 1404 if (of_device_is_compatible(np, "fsl,imx6qp-ipu") &&
1405 IS_ENABLED(CONFIG_DRM)) {
1405 ipu->prg_priv = ipu_prg_lookup_by_phandle(&pdev->dev, 1406 ipu->prg_priv = ipu_prg_lookup_by_phandle(&pdev->dev,
1406 "fsl,prg", ipu->id); 1407 "fsl,prg", ipu->id);
1407 if (!ipu->prg_priv) 1408 if (!ipu->prg_priv)
@@ -1538,8 +1539,10 @@ static struct platform_driver imx_ipu_driver = {
1538}; 1539};
1539 1540
1540static struct platform_driver * const drivers[] = { 1541static struct platform_driver * const drivers[] = {
1542#if IS_ENABLED(CONFIG_DRM)
1541 &ipu_pre_drv, 1543 &ipu_pre_drv,
1542 &ipu_prg_drv, 1544 &ipu_prg_drv,
1545#endif
1543 &imx_ipu_driver, 1546 &imx_ipu_driver,
1544}; 1547};
1545 1548
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 3ceb4a2af381..d162f0dc76e3 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -819,8 +819,7 @@ static int hid_scan_report(struct hid_device *hid)
819 hid->group = HID_GROUP_WACOM; 819 hid->group = HID_GROUP_WACOM;
820 break; 820 break;
821 case USB_VENDOR_ID_SYNAPTICS: 821 case USB_VENDOR_ID_SYNAPTICS:
822 if (hid->group == HID_GROUP_GENERIC || 822 if (hid->group == HID_GROUP_GENERIC)
823 hid->group == HID_GROUP_MULTITOUCH_WIN_8)
824 if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC) 823 if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
825 && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER)) 824 && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
826 /* 825 /*
@@ -2096,6 +2095,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
2096 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) }, 2095 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
2097 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) }, 2096 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
2098 { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) }, 2097 { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
2098 { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
2099 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, 2099 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
2100 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) }, 2100 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
2101 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) }, 2101 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
@@ -2112,6 +2112,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
2112 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) }, 2112 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
2113 { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) }, 2113 { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
2114 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) }, 2114 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
2115 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
2115 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) }, 2116 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
2116 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) }, 2117 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
2117 { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) }, 2118 { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 0e2e7c571d22..b26c030926c1 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -1028,6 +1028,9 @@
1028#define USB_DEVICE_ID_UGEE_TABLET_45 0x0045 1028#define USB_DEVICE_ID_UGEE_TABLET_45 0x0045
1029#define USB_DEVICE_ID_YIYNOVA_TABLET 0x004d 1029#define USB_DEVICE_ID_YIYNOVA_TABLET 0x004d
1030 1030
1031#define USB_VENDOR_ID_UGEE 0x28bd
1032#define USB_DEVICE_ID_UGEE_TABLET_EX07S 0x0071
1033
1031#define USB_VENDOR_ID_UNITEC 0x227d 1034#define USB_VENDOR_ID_UNITEC 0x227d
1032#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709 0x0709 1035#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709 0x0709
1033#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19 0x0a19 1036#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19 0x0a19
@@ -1082,6 +1085,7 @@
1082 1085
1083#define USB_VENDOR_ID_XIN_MO 0x16c0 1086#define USB_VENDOR_ID_XIN_MO 0x16c0
1084#define USB_DEVICE_ID_XIN_MO_DUAL_ARCADE 0x05e1 1087#define USB_DEVICE_ID_XIN_MO_DUAL_ARCADE 0x05e1
1088#define USB_DEVICE_ID_THT_2P_ARCADE 0x75e1
1085 1089
1086#define USB_VENDOR_ID_XIROKU 0x1477 1090#define USB_VENDOR_ID_XIROKU 0x1477
1087#define USB_DEVICE_ID_XIROKU_SPX 0x1006 1091#define USB_DEVICE_ID_XIROKU_SPX 0x1006
diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c
index 1509d7287ff3..e3e6e5c893cc 100644
--- a/drivers/hid/hid-uclogic.c
+++ b/drivers/hid/hid-uclogic.c
@@ -977,6 +977,7 @@ static int uclogic_probe(struct hid_device *hdev,
977 } 977 }
978 break; 978 break;
979 case USB_DEVICE_ID_UGTIZER_TABLET_GP0610: 979 case USB_DEVICE_ID_UGTIZER_TABLET_GP0610:
980 case USB_DEVICE_ID_UGEE_TABLET_EX07S:
980 /* If this is the pen interface */ 981 /* If this is the pen interface */
981 if (intf->cur_altsetting->desc.bInterfaceNumber == 1) { 982 if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
982 rc = uclogic_tablet_enable(hdev); 983 rc = uclogic_tablet_enable(hdev);
@@ -1069,6 +1070,7 @@ static const struct hid_device_id uclogic_devices[] = {
1069 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) }, 1070 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
1070 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) }, 1071 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
1071 { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) }, 1072 { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
1073 { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
1072 { } 1074 { }
1073}; 1075};
1074MODULE_DEVICE_TABLE(hid, uclogic_devices); 1076MODULE_DEVICE_TABLE(hid, uclogic_devices);
diff --git a/drivers/hid/hid-xinmo.c b/drivers/hid/hid-xinmo.c
index 7df5227a7e61..9ad7731d2e10 100644
--- a/drivers/hid/hid-xinmo.c
+++ b/drivers/hid/hid-xinmo.c
@@ -46,6 +46,7 @@ static int xinmo_event(struct hid_device *hdev, struct hid_field *field,
46 46
47static const struct hid_device_id xinmo_devices[] = { 47static const struct hid_device_id xinmo_devices[] = {
48 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) }, 48 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
49 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
49 { } 50 { }
50}; 51};
51 52
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 994bddc55b82..e2666ef84dc1 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -2165,6 +2165,14 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
2165 2165
2166 wacom_update_name(wacom, wireless ? " (WL)" : ""); 2166 wacom_update_name(wacom, wireless ? " (WL)" : "");
2167 2167
2168 /* pen only Bamboo neither support touch nor pad */
2169 if ((features->type == BAMBOO_PEN) &&
2170 ((features->device_type & WACOM_DEVICETYPE_TOUCH) ||
2171 (features->device_type & WACOM_DEVICETYPE_PAD))) {
2172 error = -ENODEV;
2173 goto fail;
2174 }
2175
2168 error = wacom_add_shared_data(hdev); 2176 error = wacom_add_shared_data(hdev);
2169 if (error) 2177 if (error)
2170 goto fail; 2178 goto fail;
@@ -2208,14 +2216,8 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
2208 /* touch only Bamboo doesn't support pen */ 2216 /* touch only Bamboo doesn't support pen */
2209 if ((features->type == BAMBOO_TOUCH) && 2217 if ((features->type == BAMBOO_TOUCH) &&
2210 (features->device_type & WACOM_DEVICETYPE_PEN)) { 2218 (features->device_type & WACOM_DEVICETYPE_PEN)) {
2211 error = -ENODEV; 2219 cancel_delayed_work_sync(&wacom->init_work);
2212 goto fail_quirks; 2220 _wacom_query_tablet_data(wacom);
2213 }
2214
2215 /* pen only Bamboo neither support touch nor pad */
2216 if ((features->type == BAMBOO_PEN) &&
2217 ((features->device_type & WACOM_DEVICETYPE_TOUCH) ||
2218 (features->device_type & WACOM_DEVICETYPE_PAD))) {
2219 error = -ENODEV; 2221 error = -ENODEV;
2220 goto fail_quirks; 2222 goto fail_quirks;
2221 } 2223 }
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index dfc1c0e37c40..ad31d21da316 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -35,7 +35,6 @@
35 * warranty of any kind, whether express or implied. 35 * warranty of any kind, whether express or implied.
36 */ 36 */
37 37
38#include <linux/acpi.h>
39#include <linux/device.h> 38#include <linux/device.h>
40#include <linux/gpio/consumer.h> 39#include <linux/gpio/consumer.h>
41#include <linux/i2c.h> 40#include <linux/i2c.h>
@@ -117,6 +116,10 @@ static const struct chip_desc chips[] = {
117 .has_irq = 1, 116 .has_irq = 1,
118 .muxtype = pca954x_isswi, 117 .muxtype = pca954x_isswi,
119 }, 118 },
119 [pca_9546] = {
120 .nchans = 4,
121 .muxtype = pca954x_isswi,
122 },
120 [pca_9547] = { 123 [pca_9547] = {
121 .nchans = 8, 124 .nchans = 8,
122 .enable = 0x8, 125 .enable = 0x8,
@@ -134,28 +137,13 @@ static const struct i2c_device_id pca954x_id[] = {
134 { "pca9543", pca_9543 }, 137 { "pca9543", pca_9543 },
135 { "pca9544", pca_9544 }, 138 { "pca9544", pca_9544 },
136 { "pca9545", pca_9545 }, 139 { "pca9545", pca_9545 },
137 { "pca9546", pca_9545 }, 140 { "pca9546", pca_9546 },
138 { "pca9547", pca_9547 }, 141 { "pca9547", pca_9547 },
139 { "pca9548", pca_9548 }, 142 { "pca9548", pca_9548 },
140 { } 143 { }
141}; 144};
142MODULE_DEVICE_TABLE(i2c, pca954x_id); 145MODULE_DEVICE_TABLE(i2c, pca954x_id);
143 146
144#ifdef CONFIG_ACPI
145static const struct acpi_device_id pca954x_acpi_ids[] = {
146 { .id = "PCA9540", .driver_data = pca_9540 },
147 { .id = "PCA9542", .driver_data = pca_9542 },
148 { .id = "PCA9543", .driver_data = pca_9543 },
149 { .id = "PCA9544", .driver_data = pca_9544 },
150 { .id = "PCA9545", .driver_data = pca_9545 },
151 { .id = "PCA9546", .driver_data = pca_9545 },
152 { .id = "PCA9547", .driver_data = pca_9547 },
153 { .id = "PCA9548", .driver_data = pca_9548 },
154 { }
155};
156MODULE_DEVICE_TABLE(acpi, pca954x_acpi_ids);
157#endif
158
159#ifdef CONFIG_OF 147#ifdef CONFIG_OF
160static const struct of_device_id pca954x_of_match[] = { 148static const struct of_device_id pca954x_of_match[] = {
161 { .compatible = "nxp,pca9540", .data = &chips[pca_9540] }, 149 { .compatible = "nxp,pca9540", .data = &chips[pca_9540] },
@@ -393,17 +381,8 @@ static int pca954x_probe(struct i2c_client *client,
393 match = of_match_device(of_match_ptr(pca954x_of_match), &client->dev); 381 match = of_match_device(of_match_ptr(pca954x_of_match), &client->dev);
394 if (match) 382 if (match)
395 data->chip = of_device_get_match_data(&client->dev); 383 data->chip = of_device_get_match_data(&client->dev);
396 else if (id) 384 else
397 data->chip = &chips[id->driver_data]; 385 data->chip = &chips[id->driver_data];
398 else {
399 const struct acpi_device_id *acpi_id;
400
401 acpi_id = acpi_match_device(ACPI_PTR(pca954x_acpi_ids),
402 &client->dev);
403 if (!acpi_id)
404 return -ENODEV;
405 data->chip = &chips[acpi_id->driver_data];
406 }
407 386
408 data->last_chan = 0; /* force the first selection */ 387 data->last_chan = 0; /* force the first selection */
409 388
@@ -492,7 +471,6 @@ static struct i2c_driver pca954x_driver = {
492 .name = "pca954x", 471 .name = "pca954x",
493 .pm = &pca954x_pm, 472 .pm = &pca954x_pm,
494 .of_match_table = of_match_ptr(pca954x_of_match), 473 .of_match_table = of_match_ptr(pca954x_of_match),
495 .acpi_match_table = ACPI_PTR(pca954x_acpi_ids),
496 }, 474 },
497 .probe = pca954x_probe, 475 .probe = pca954x_probe,
498 .remove = pca954x_remove, 476 .remove = pca954x_remove,
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index ca5759c0c318..43a6cb078193 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -370,10 +370,12 @@ static int hid_accel_3d_probe(struct platform_device *pdev)
370 name = "accel_3d"; 370 name = "accel_3d";
371 channel_spec = accel_3d_channels; 371 channel_spec = accel_3d_channels;
372 channel_size = sizeof(accel_3d_channels); 372 channel_size = sizeof(accel_3d_channels);
373 indio_dev->num_channels = ARRAY_SIZE(accel_3d_channels);
373 } else { 374 } else {
374 name = "gravity"; 375 name = "gravity";
375 channel_spec = gravity_channels; 376 channel_spec = gravity_channels;
376 channel_size = sizeof(gravity_channels); 377 channel_size = sizeof(gravity_channels);
378 indio_dev->num_channels = ARRAY_SIZE(gravity_channels);
377 } 379 }
378 ret = hid_sensor_parse_common_attributes(hsdev, hsdev->usage, 380 ret = hid_sensor_parse_common_attributes(hsdev, hsdev->usage,
379 &accel_state->common_attributes); 381 &accel_state->common_attributes);
@@ -395,7 +397,6 @@ static int hid_accel_3d_probe(struct platform_device *pdev)
395 goto error_free_dev_mem; 397 goto error_free_dev_mem;
396 } 398 }
397 399
398 indio_dev->num_channels = ARRAY_SIZE(accel_3d_channels);
399 indio_dev->dev.parent = &pdev->dev; 400 indio_dev->dev.parent = &pdev->dev;
400 indio_dev->info = &accel_3d_info; 401 indio_dev->info = &accel_3d_info;
401 indio_dev->name = name; 402 indio_dev->name = name;
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
index d6c372bb433b..c17596f7ed2c 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -61,7 +61,7 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev,
61 ret = st->core.read_ec_sensors_data(indio_dev, 1 << idx, &data); 61 ret = st->core.read_ec_sensors_data(indio_dev, 1 << idx, &data);
62 if (ret < 0) 62 if (ret < 0)
63 break; 63 break;
64 64 ret = IIO_VAL_INT;
65 *val = data; 65 *val = data;
66 break; 66 break;
67 case IIO_CHAN_INFO_CALIBBIAS: 67 case IIO_CHAN_INFO_CALIBBIAS:
@@ -76,7 +76,7 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev,
76 for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++) 76 for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
77 st->core.calib[i] = 77 st->core.calib[i] =
78 st->core.resp->sensor_offset.offset[i]; 78 st->core.resp->sensor_offset.offset[i];
79 79 ret = IIO_VAL_INT;
80 *val = st->core.calib[idx]; 80 *val = st->core.calib[idx];
81 break; 81 break;
82 case IIO_CHAN_INFO_SCALE: 82 case IIO_CHAN_INFO_SCALE:
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index 7afdac42ed42..01e02b9926d4 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -379,6 +379,8 @@ int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
379{ 379{
380 380
381 struct hid_sensor_hub_attribute_info timestamp; 381 struct hid_sensor_hub_attribute_info timestamp;
382 s32 value;
383 int ret;
382 384
383 hid_sensor_get_reporting_interval(hsdev, usage_id, st); 385 hid_sensor_get_reporting_interval(hsdev, usage_id, st);
384 386
@@ -417,6 +419,14 @@ int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
417 st->sensitivity.index, st->sensitivity.report_id, 419 st->sensitivity.index, st->sensitivity.report_id,
418 timestamp.index, timestamp.report_id); 420 timestamp.index, timestamp.report_id);
419 421
422 ret = sensor_hub_get_feature(hsdev,
423 st->power_state.report_id,
424 st->power_state.index, sizeof(value), &value);
425 if (ret < 0)
426 return ret;
427 if (value < 0)
428 return -EINVAL;
429
420 return 0; 430 return 0;
421} 431}
422EXPORT_SYMBOL(hid_sensor_parse_common_attributes); 432EXPORT_SYMBOL(hid_sensor_parse_common_attributes);
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index f7fcfa886f72..821919dd245b 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -27,6 +27,7 @@
27#include <linux/iio/trigger_consumer.h> 27#include <linux/iio/trigger_consumer.h>
28#include <linux/iio/triggered_buffer.h> 28#include <linux/iio/triggered_buffer.h>
29#include <linux/regmap.h> 29#include <linux/regmap.h>
30#include <linux/delay.h>
30#include "bmg160.h" 31#include "bmg160.h"
31 32
32#define BMG160_IRQ_NAME "bmg160_event" 33#define BMG160_IRQ_NAME "bmg160_event"
@@ -52,6 +53,9 @@
52#define BMG160_DEF_BW 100 53#define BMG160_DEF_BW 100
53#define BMG160_REG_PMU_BW_RES BIT(7) 54#define BMG160_REG_PMU_BW_RES BIT(7)
54 55
56#define BMG160_GYRO_REG_RESET 0x14
57#define BMG160_GYRO_RESET_VAL 0xb6
58
55#define BMG160_REG_INT_MAP_0 0x17 59#define BMG160_REG_INT_MAP_0 0x17
56#define BMG160_INT_MAP_0_BIT_ANY BIT(1) 60#define BMG160_INT_MAP_0_BIT_ANY BIT(1)
57 61
@@ -236,6 +240,14 @@ static int bmg160_chip_init(struct bmg160_data *data)
236 int ret; 240 int ret;
237 unsigned int val; 241 unsigned int val;
238 242
243 /*
244 * Reset chip to get it in a known good state. A delay of 30ms after
245 * reset is required according to the datasheet.
246 */
247 regmap_write(data->regmap, BMG160_GYRO_REG_RESET,
248 BMG160_GYRO_RESET_VAL);
249 usleep_range(30000, 30700);
250
239 ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val); 251 ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val);
240 if (ret < 0) { 252 if (ret < 0) {
241 dev_err(dev, "Error reading reg_chip_id\n"); 253 dev_err(dev, "Error reading reg_chip_id\n");
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index d18ded45bedd..3ff91e02fee3 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -610,10 +610,9 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
610 tmp0 = (int)div_s64_rem(tmp, 1000000000, &tmp1); 610 tmp0 = (int)div_s64_rem(tmp, 1000000000, &tmp1);
611 return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1)); 611 return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
612 case IIO_VAL_FRACTIONAL_LOG2: 612 case IIO_VAL_FRACTIONAL_LOG2:
613 tmp = (s64)vals[0] * 1000000000LL >> vals[1]; 613 tmp = shift_right((s64)vals[0] * 1000000000LL, vals[1]);
614 tmp1 = do_div(tmp, 1000000000LL); 614 tmp0 = (int)div_s64_rem(tmp, 1000000000LL, &tmp1);
615 tmp0 = tmp; 615 return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
616 return snprintf(buf, len, "%d.%09u", tmp0, tmp1);
617 case IIO_VAL_INT_MULTIPLE: 616 case IIO_VAL_INT_MULTIPLE:
618 { 617 {
619 int i; 618 int i;
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 5f2680855552..fd0edca0e656 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -457,6 +457,7 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
457 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 457 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
458 }, 458 },
459 .multi_read_bit = true, 459 .multi_read_bit = true,
460 .bootime = 2,
460 }, 461 },
461}; 462};
462 463
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 91cbe86b25c8..fcbed35e95a8 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -817,6 +817,7 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
817 rx_wr->sg_list = &rx_desc->rx_sg; 817 rx_wr->sg_list = &rx_desc->rx_sg;
818 rx_wr->num_sge = 1; 818 rx_wr->num_sge = 1;
819 rx_wr->next = rx_wr + 1; 819 rx_wr->next = rx_wr + 1;
820 rx_desc->in_use = false;
820 } 821 }
821 rx_wr--; 822 rx_wr--;
822 rx_wr->next = NULL; /* mark end of work requests list */ 823 rx_wr->next = NULL; /* mark end of work requests list */
@@ -835,6 +836,15 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
835 struct ib_recv_wr *rx_wr_failed, rx_wr; 836 struct ib_recv_wr *rx_wr_failed, rx_wr;
836 int ret; 837 int ret;
837 838
839 if (!rx_desc->in_use) {
840 /*
841 * if the descriptor is not in-use we already reposted it
842 * for recv, so just silently return
843 */
844 return 0;
845 }
846
847 rx_desc->in_use = false;
838 rx_wr.wr_cqe = &rx_desc->rx_cqe; 848 rx_wr.wr_cqe = &rx_desc->rx_cqe;
839 rx_wr.sg_list = &rx_desc->rx_sg; 849 rx_wr.sg_list = &rx_desc->rx_sg;
840 rx_wr.num_sge = 1; 850 rx_wr.num_sge = 1;
@@ -1397,6 +1407,8 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1397 return; 1407 return;
1398 } 1408 }
1399 1409
1410 rx_desc->in_use = true;
1411
1400 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, 1412 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
1401 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); 1413 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1402 1414
@@ -1659,10 +1671,23 @@ isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
1659 ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr); 1671 ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr);
1660 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); 1672 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1661 1673
1662 if (ret) 1674 if (ret) {
1663 transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0); 1675 /*
1664 else 1676 * transport_generic_request_failure() expects to have
1665 isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd); 1677 * plus two references to handle queue-full, so re-add
1678 * one here as target-core will have already dropped
1679 * it after the first isert_put_datain() callback.
1680 */
1681 kref_get(&cmd->cmd_kref);
1682 transport_generic_request_failure(cmd, cmd->pi_err);
1683 } else {
1684 /*
1685 * XXX: isert_put_response() failure is not retried.
1686 */
1687 ret = isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
1688 if (ret)
1689 pr_warn_ratelimited("isert_put_response() ret: %d\n", ret);
1690 }
1666} 1691}
1667 1692
1668static void 1693static void
@@ -1699,13 +1724,15 @@ isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
1699 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1724 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1700 spin_unlock_bh(&cmd->istate_lock); 1725 spin_unlock_bh(&cmd->istate_lock);
1701 1726
1702 if (ret) { 1727 /*
1703 target_put_sess_cmd(se_cmd); 1728 * transport_generic_request_failure() will drop the extra
1704 transport_send_check_condition_and_sense(se_cmd, 1729 * se_cmd->cmd_kref reference after T10-PI error, and handle
1705 se_cmd->pi_err, 0); 1730 * any non-zero ->queue_status() callback error retries.
1706 } else { 1731 */
1732 if (ret)
1733 transport_generic_request_failure(se_cmd, se_cmd->pi_err);
1734 else
1707 target_execute_cmd(se_cmd); 1735 target_execute_cmd(se_cmd);
1708 }
1709} 1736}
1710 1737
1711static void 1738static void
@@ -2171,26 +2198,28 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2171 chain_wr = &isert_cmd->tx_desc.send_wr; 2198 chain_wr = &isert_cmd->tx_desc.send_wr;
2172 } 2199 }
2173 2200
2174 isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr); 2201 rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
2175 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", isert_cmd); 2202 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n",
2176 return 1; 2203 isert_cmd, rc);
2204 return rc;
2177} 2205}
2178 2206
2179static int 2207static int
2180isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) 2208isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2181{ 2209{
2182 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2210 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2211 int ret;
2183 2212
2184 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", 2213 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2185 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done); 2214 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done);
2186 2215
2187 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done; 2216 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
2188 isert_rdma_rw_ctx_post(isert_cmd, conn->context, 2217 ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context,
2189 &isert_cmd->tx_desc.tx_cqe, NULL); 2218 &isert_cmd->tx_desc.tx_cqe, NULL);
2190 2219
2191 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", 2220 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n",
2192 isert_cmd); 2221 isert_cmd, ret);
2193 return 0; 2222 return ret;
2194} 2223}
2195 2224
2196static int 2225static int
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index c02ada57d7f5..87d994de8c91 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -60,7 +60,7 @@
60 60
61#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \ 61#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \
62 (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \ 62 (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \
63 sizeof(struct ib_cqe))) 63 sizeof(struct ib_cqe) + sizeof(bool)))
64 64
65#define ISCSI_ISER_SG_TABLESIZE 256 65#define ISCSI_ISER_SG_TABLESIZE 256
66 66
@@ -85,6 +85,7 @@ struct iser_rx_desc {
85 u64 dma_addr; 85 u64 dma_addr;
86 struct ib_sge rx_sg; 86 struct ib_sge rx_sg;
87 struct ib_cqe rx_cqe; 87 struct ib_cqe rx_cqe;
88 bool in_use;
88 char pad[ISER_RX_PAD_SIZE]; 89 char pad[ISER_RX_PAD_SIZE];
89} __packed; 90} __packed;
90 91
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 155fcb3b6230..153b1ee13e03 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -202,6 +202,7 @@ static const struct xpad_device {
202 { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 202 { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
203 { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, 203 { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
204 { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 }, 204 { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
205 { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
205 { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 }, 206 { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
206 { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, 207 { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
207 { 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 }, 208 { 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },
@@ -326,6 +327,7 @@ static struct usb_device_id xpad_table[] = {
326 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ 327 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
327 XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */ 328 XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
328 XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */ 329 XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */
330 XPAD_XBOXONE_VENDOR(0x1532), /* Razer Wildcat */
329 XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */ 331 XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */
330 XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ 332 XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
331 XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ 333 XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 125528f39e92..8162121bb1bc 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -262,6 +262,7 @@ config IRQ_MXS
262 262
263config MVEBU_ODMI 263config MVEBU_ODMI
264 bool 264 bool
265 select GENERIC_MSI_IRQ_DOMAIN
265 266
266config MVEBU_PIC 267config MVEBU_PIC
267 bool 268 bool
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index 15af9a9753e5..2d203b422129 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -230,6 +230,8 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
230 return -ENOMEM; 230 return -ENOMEM;
231 } 231 }
232 232
233 raw_spin_lock_init(&cd->rlock);
234
233 cd->gpc_base = of_iomap(node, 0); 235 cd->gpc_base = of_iomap(node, 0);
234 if (!cd->gpc_base) { 236 if (!cd->gpc_base) {
235 pr_err("fsl-gpcv2: unable to map gpc registers\n"); 237 pr_err("fsl-gpcv2: unable to map gpc registers\n");
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 11d12bccc4e7..cd20df12d63d 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -991,8 +991,12 @@ static void __init gic_map_single_int(struct device_node *node,
991 991
992static void __init gic_map_interrupts(struct device_node *node) 992static void __init gic_map_interrupts(struct device_node *node)
993{ 993{
994 gic_map_single_int(node, GIC_LOCAL_INT_WD);
995 gic_map_single_int(node, GIC_LOCAL_INT_COMPARE);
994 gic_map_single_int(node, GIC_LOCAL_INT_TIMER); 996 gic_map_single_int(node, GIC_LOCAL_INT_TIMER);
995 gic_map_single_int(node, GIC_LOCAL_INT_PERFCTR); 997 gic_map_single_int(node, GIC_LOCAL_INT_PERFCTR);
998 gic_map_single_int(node, GIC_LOCAL_INT_SWINT0);
999 gic_map_single_int(node, GIC_LOCAL_INT_SWINT1);
996 gic_map_single_int(node, GIC_LOCAL_INT_FDC); 1000 gic_map_single_int(node, GIC_LOCAL_INT_FDC);
997} 1001}
998 1002
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 1dfd1085a04f..9ca691d6c13b 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -1032,6 +1032,7 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
1032 sizeof(avmb1_carddef)))) 1032 sizeof(avmb1_carddef))))
1033 return -EFAULT; 1033 return -EFAULT;
1034 cdef.cardtype = AVM_CARDTYPE_B1; 1034 cdef.cardtype = AVM_CARDTYPE_B1;
1035 cdef.cardnr = 0;
1035 } else { 1036 } else {
1036 if ((retval = copy_from_user(&cdef, data, 1037 if ((retval = copy_from_user(&cdef, data,
1037 sizeof(avmb1_extcarddef)))) 1038 sizeof(avmb1_extcarddef))))
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index e4c2c1a1e993..6735c8d6a445 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -932,7 +932,7 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
932 *result = true; 932 *result = true;
933 933
934 r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root, 934 r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
935 from_cblock(begin), &cmd->dirty_cursor); 935 from_cblock(cmd->cache_blocks), &cmd->dirty_cursor);
936 if (r) { 936 if (r) {
937 DMERR("%s: dm_bitset_cursor_begin for dirty failed", __func__); 937 DMERR("%s: dm_bitset_cursor_begin for dirty failed", __func__);
938 return r; 938 return r;
@@ -959,14 +959,16 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
959 return 0; 959 return 0;
960 } 960 }
961 961
962 begin = to_cblock(from_cblock(begin) + 1);
963 if (begin == end)
964 break;
965
962 r = dm_bitset_cursor_next(&cmd->dirty_cursor); 966 r = dm_bitset_cursor_next(&cmd->dirty_cursor);
963 if (r) { 967 if (r) {
964 DMERR("%s: dm_bitset_cursor_next for dirty failed", __func__); 968 DMERR("%s: dm_bitset_cursor_next for dirty failed", __func__);
965 dm_bitset_cursor_end(&cmd->dirty_cursor); 969 dm_bitset_cursor_end(&cmd->dirty_cursor);
966 return r; 970 return r;
967 } 971 }
968
969 begin = to_cblock(from_cblock(begin) + 1);
970 } 972 }
971 973
972 dm_bitset_cursor_end(&cmd->dirty_cursor); 974 dm_bitset_cursor_end(&cmd->dirty_cursor);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index f8564d63982f..1e217ba84d09 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3726,7 +3726,7 @@ static int raid_preresume(struct dm_target *ti)
3726 return r; 3726 return r;
3727 3727
3728 /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */ 3728 /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */
3729 if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && 3729 if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
3730 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) { 3730 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) {
3731 r = bitmap_resize(mddev->bitmap, mddev->dev_sectors, 3731 r = bitmap_resize(mddev->bitmap, mddev->dev_sectors,
3732 to_bytes(rs->requested_bitmap_chunk_sectors), 0); 3732 to_bytes(rs->requested_bitmap_chunk_sectors), 0);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 28955b94d2b2..0b081d170087 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -755,6 +755,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
755 /* Undo dm_start_request() before requeuing */ 755 /* Undo dm_start_request() before requeuing */
756 rq_end_stats(md, rq); 756 rq_end_stats(md, rq);
757 rq_completed(md, rq_data_dir(rq), false); 757 rq_completed(md, rq_data_dir(rq), false);
758 blk_mq_delay_run_hw_queue(hctx, 100/*ms*/);
758 return BLK_MQ_RQ_QUEUE_BUSY; 759 return BLK_MQ_RQ_QUEUE_BUSY;
759 } 760 }
760 761
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 0f0eb8a3d922..78f36012eaca 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -146,8 +146,6 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
146 block = fec_buffer_rs_block(v, fio, n, i); 146 block = fec_buffer_rs_block(v, fio, n, i);
147 res = fec_decode_rs8(v, fio, block, &par[offset], neras); 147 res = fec_decode_rs8(v, fio, block, &par[offset], neras);
148 if (res < 0) { 148 if (res < 0) {
149 dm_bufio_release(buf);
150
151 r = res; 149 r = res;
152 goto error; 150 goto error;
153 } 151 }
@@ -172,6 +170,8 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
172done: 170done:
173 r = corrected; 171 r = corrected;
174error: 172error:
173 dm_bufio_release(buf);
174
175 if (r < 0 && neras) 175 if (r < 0 && neras)
176 DMERR_LIMIT("%s: FEC %llu: failed to correct: %d", 176 DMERR_LIMIT("%s: FEC %llu: failed to correct: %d",
177 v->data_dev->name, (unsigned long long)rsb, r); 177 v->data_dev->name, (unsigned long long)rsb, r);
@@ -269,7 +269,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
269 &is_zero) == 0) { 269 &is_zero) == 0) {
270 /* skip known zero blocks entirely */ 270 /* skip known zero blocks entirely */
271 if (is_zero) 271 if (is_zero)
272 continue; 272 goto done;
273 273
274 /* 274 /*
275 * skip if we have already found the theoretical 275 * skip if we have already found the theoretical
@@ -439,6 +439,13 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
439 if (!verity_fec_is_enabled(v)) 439 if (!verity_fec_is_enabled(v))
440 return -EOPNOTSUPP; 440 return -EOPNOTSUPP;
441 441
442 if (fio->level >= DM_VERITY_FEC_MAX_RECURSION) {
443 DMWARN_LIMIT("%s: FEC: recursion too deep", v->data_dev->name);
444 return -EIO;
445 }
446
447 fio->level++;
448
442 if (type == DM_VERITY_BLOCK_TYPE_METADATA) 449 if (type == DM_VERITY_BLOCK_TYPE_METADATA)
443 block += v->data_blocks; 450 block += v->data_blocks;
444 451
@@ -470,7 +477,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
470 if (r < 0) { 477 if (r < 0) {
471 r = fec_decode_rsb(v, io, fio, rsb, offset, true); 478 r = fec_decode_rsb(v, io, fio, rsb, offset, true);
472 if (r < 0) 479 if (r < 0)
473 return r; 480 goto done;
474 } 481 }
475 482
476 if (dest) 483 if (dest)
@@ -480,6 +487,8 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
480 r = verity_for_bv_block(v, io, iter, fec_bv_copy); 487 r = verity_for_bv_block(v, io, iter, fec_bv_copy);
481 } 488 }
482 489
490done:
491 fio->level--;
483 return r; 492 return r;
484} 493}
485 494
@@ -520,6 +529,7 @@ void verity_fec_init_io(struct dm_verity_io *io)
520 memset(fio->bufs, 0, sizeof(fio->bufs)); 529 memset(fio->bufs, 0, sizeof(fio->bufs));
521 fio->nbufs = 0; 530 fio->nbufs = 0;
522 fio->output = NULL; 531 fio->output = NULL;
532 fio->level = 0;
523} 533}
524 534
525/* 535/*
diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h
index 7fa0298b995e..bb31ce87a933 100644
--- a/drivers/md/dm-verity-fec.h
+++ b/drivers/md/dm-verity-fec.h
@@ -27,6 +27,9 @@
27#define DM_VERITY_FEC_BUF_MAX \ 27#define DM_VERITY_FEC_BUF_MAX \
28 (1 << (PAGE_SHIFT - DM_VERITY_FEC_BUF_RS_BITS)) 28 (1 << (PAGE_SHIFT - DM_VERITY_FEC_BUF_RS_BITS))
29 29
30/* maximum recursion level for verity_fec_decode */
31#define DM_VERITY_FEC_MAX_RECURSION 4
32
30#define DM_VERITY_OPT_FEC_DEV "use_fec_from_device" 33#define DM_VERITY_OPT_FEC_DEV "use_fec_from_device"
31#define DM_VERITY_OPT_FEC_BLOCKS "fec_blocks" 34#define DM_VERITY_OPT_FEC_BLOCKS "fec_blocks"
32#define DM_VERITY_OPT_FEC_START "fec_start" 35#define DM_VERITY_OPT_FEC_START "fec_start"
@@ -58,6 +61,7 @@ struct dm_verity_fec_io {
58 unsigned nbufs; /* number of buffers allocated */ 61 unsigned nbufs; /* number of buffers allocated */
59 u8 *output; /* buffer for corrected output */ 62 u8 *output; /* buffer for corrected output */
60 size_t output_pos; 63 size_t output_pos;
64 unsigned level; /* recursion level */
61}; 65};
62 66
63#ifdef CONFIG_DM_VERITY_FEC 67#ifdef CONFIG_DM_VERITY_FEC
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index fb6a177be461..2db0413f5d57 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -356,8 +356,8 @@ static struct dma_buf_ops vb2_dc_dmabuf_ops = {
356 .detach = vb2_dc_dmabuf_ops_detach, 356 .detach = vb2_dc_dmabuf_ops_detach,
357 .map_dma_buf = vb2_dc_dmabuf_ops_map, 357 .map_dma_buf = vb2_dc_dmabuf_ops_map,
358 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap, 358 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
359 .kmap = vb2_dc_dmabuf_ops_kmap, 359 .map = vb2_dc_dmabuf_ops_kmap,
360 .kmap_atomic = vb2_dc_dmabuf_ops_kmap, 360 .map_atomic = vb2_dc_dmabuf_ops_kmap,
361 .vmap = vb2_dc_dmabuf_ops_vmap, 361 .vmap = vb2_dc_dmabuf_ops_vmap,
362 .mmap = vb2_dc_dmabuf_ops_mmap, 362 .mmap = vb2_dc_dmabuf_ops_mmap,
363 .release = vb2_dc_dmabuf_ops_release, 363 .release = vb2_dc_dmabuf_ops_release,
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index ecff8f492c4f..6fd1343b7c13 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -504,8 +504,8 @@ static struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
504 .detach = vb2_dma_sg_dmabuf_ops_detach, 504 .detach = vb2_dma_sg_dmabuf_ops_detach,
505 .map_dma_buf = vb2_dma_sg_dmabuf_ops_map, 505 .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
506 .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap, 506 .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
507 .kmap = vb2_dma_sg_dmabuf_ops_kmap, 507 .map = vb2_dma_sg_dmabuf_ops_kmap,
508 .kmap_atomic = vb2_dma_sg_dmabuf_ops_kmap, 508 .map_atomic = vb2_dma_sg_dmabuf_ops_kmap,
509 .vmap = vb2_dma_sg_dmabuf_ops_vmap, 509 .vmap = vb2_dma_sg_dmabuf_ops_vmap,
510 .mmap = vb2_dma_sg_dmabuf_ops_mmap, 510 .mmap = vb2_dma_sg_dmabuf_ops_mmap,
511 .release = vb2_dma_sg_dmabuf_ops_release, 511 .release = vb2_dma_sg_dmabuf_ops_release,
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index 3f778147cdef..27d1db3bb8cf 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -342,8 +342,8 @@ static struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
342 .detach = vb2_vmalloc_dmabuf_ops_detach, 342 .detach = vb2_vmalloc_dmabuf_ops_detach,
343 .map_dma_buf = vb2_vmalloc_dmabuf_ops_map, 343 .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
344 .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap, 344 .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
345 .kmap = vb2_vmalloc_dmabuf_ops_kmap, 345 .map = vb2_vmalloc_dmabuf_ops_kmap,
346 .kmap_atomic = vb2_vmalloc_dmabuf_ops_kmap, 346 .map_atomic = vb2_vmalloc_dmabuf_ops_kmap,
347 .vmap = vb2_vmalloc_dmabuf_ops_vmap, 347 .vmap = vb2_vmalloc_dmabuf_ops_vmap,
348 .mmap = vb2_vmalloc_dmabuf_ops_mmap, 348 .mmap = vb2_vmalloc_dmabuf_ops_mmap,
349 .release = vb2_vmalloc_dmabuf_ops_release, 349 .release = vb2_vmalloc_dmabuf_ops_release,
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 7fd964256faa..d5430ed02a67 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -29,6 +29,8 @@
29 29
30#include "sdhci-pltfm.h" 30#include "sdhci-pltfm.h"
31 31
32#define SDMMC_MC1R 0x204
33#define SDMMC_MC1R_DDR BIT(3)
32#define SDMMC_CACR 0x230 34#define SDMMC_CACR 0x230
33#define SDMMC_CACR_CAPWREN BIT(0) 35#define SDMMC_CACR_CAPWREN BIT(0)
34#define SDMMC_CACR_KEY (0x46 << 8) 36#define SDMMC_CACR_KEY (0x46 << 8)
@@ -103,11 +105,18 @@ static void sdhci_at91_set_power(struct sdhci_host *host, unsigned char mode,
103 sdhci_set_power_noreg(host, mode, vdd); 105 sdhci_set_power_noreg(host, mode, vdd);
104} 106}
105 107
108void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
109{
110 if (timing == MMC_TIMING_MMC_DDR52)
111 sdhci_writeb(host, SDMMC_MC1R_DDR, SDMMC_MC1R);
112 sdhci_set_uhs_signaling(host, timing);
113}
114
106static const struct sdhci_ops sdhci_at91_sama5d2_ops = { 115static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
107 .set_clock = sdhci_at91_set_clock, 116 .set_clock = sdhci_at91_set_clock,
108 .set_bus_width = sdhci_set_bus_width, 117 .set_bus_width = sdhci_set_bus_width,
109 .reset = sdhci_reset, 118 .reset = sdhci_reset,
110 .set_uhs_signaling = sdhci_set_uhs_signaling, 119 .set_uhs_signaling = sdhci_at91_set_uhs_signaling,
111 .set_power = sdhci_at91_set_power, 120 .set_power = sdhci_at91_set_power,
112}; 121};
113 122
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 9c1a099afbbe..63bc33a54d0d 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1830,6 +1830,9 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1830 struct sdhci_host *host = mmc_priv(mmc); 1830 struct sdhci_host *host = mmc_priv(mmc);
1831 unsigned long flags; 1831 unsigned long flags;
1832 1832
1833 if (enable)
1834 pm_runtime_get_noresume(host->mmc->parent);
1835
1833 spin_lock_irqsave(&host->lock, flags); 1836 spin_lock_irqsave(&host->lock, flags);
1834 if (enable) 1837 if (enable)
1835 host->flags |= SDHCI_SDIO_IRQ_ENABLED; 1838 host->flags |= SDHCI_SDIO_IRQ_ENABLED;
@@ -1838,6 +1841,9 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1838 1841
1839 sdhci_enable_sdio_irq_nolock(host, enable); 1842 sdhci_enable_sdio_irq_nolock(host, enable);
1840 spin_unlock_irqrestore(&host->lock, flags); 1843 spin_unlock_irqrestore(&host->lock, flags);
1844
1845 if (!enable)
1846 pm_runtime_put_noidle(host->mmc->parent);
1841} 1847}
1842 1848
1843static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 1849static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 138f5ae75c0b..4d1fe8d95042 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -557,7 +557,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
557 int work_done = 0; 557 int work_done = 0;
558 558
559 u32 stcmd = readl(priv->base + IFI_CANFD_STCMD); 559 u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
560 u32 rxstcmd = readl(priv->base + IFI_CANFD_STCMD); 560 u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD);
561 u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR); 561 u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
562 562
563 /* Handle bus state changes */ 563 /* Handle bus state changes */
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index caed4e6960f8..11662f479e76 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -826,8 +826,7 @@ static int rcar_can_probe(struct platform_device *pdev)
826 826
827 devm_can_led_init(ndev); 827 devm_can_led_init(ndev);
828 828
829 dev_info(&pdev->dev, "device registered (regs @ %p, IRQ%d)\n", 829 dev_info(&pdev->dev, "device registered (IRQ%d)\n", ndev->irq);
830 priv->regs, ndev->irq);
831 830
832 return 0; 831 return 0;
833fail_candev: 832fail_candev:
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index d05fbfdce5e5..5d6c40d86775 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -100,11 +100,6 @@ static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
100 goto err_exit; 100 goto err_exit;
101 ndev->mtu = new_mtu; 101 ndev->mtu = new_mtu;
102 102
103 if (netif_running(ndev)) {
104 aq_ndev_close(ndev);
105 aq_ndev_open(ndev);
106 }
107
108err_exit: 103err_exit:
109 return err; 104 return err;
110} 105}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index ee78444bfb88..cdb02991f249 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -487,6 +487,9 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
487 dx_buff->mss = skb_shinfo(skb)->gso_size; 487 dx_buff->mss = skb_shinfo(skb)->gso_size;
488 dx_buff->is_txc = 1U; 488 dx_buff->is_txc = 1U;
489 489
490 dx_buff->is_ipv6 =
491 (ip_hdr(skb)->version == 6) ? 1U : 0U;
492
490 dx = aq_ring_next_dx(ring, dx); 493 dx = aq_ring_next_dx(ring, dx);
491 dx_buff = &ring->buff_ring[dx]; 494 dx_buff = &ring->buff_ring[dx];
492 ++ret; 495 ++ret;
@@ -510,10 +513,22 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
510 if (skb->ip_summed == CHECKSUM_PARTIAL) { 513 if (skb->ip_summed == CHECKSUM_PARTIAL) {
511 dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ? 514 dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ?
512 1U : 0U; 515 1U : 0U;
513 dx_buff->is_tcp_cso = 516
514 (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U; 517 if (ip_hdr(skb)->version == 4) {
515 dx_buff->is_udp_cso = 518 dx_buff->is_tcp_cso =
516 (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U; 519 (ip_hdr(skb)->protocol == IPPROTO_TCP) ?
520 1U : 0U;
521 dx_buff->is_udp_cso =
522 (ip_hdr(skb)->protocol == IPPROTO_UDP) ?
523 1U : 0U;
524 } else if (ip_hdr(skb)->version == 6) {
525 dx_buff->is_tcp_cso =
526 (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ?
527 1U : 0U;
528 dx_buff->is_udp_cso =
529 (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ?
530 1U : 0U;
531 }
517 } 532 }
518 533
519 for (; nr_frags--; ++frag_count) { 534 for (; nr_frags--; ++frag_count) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 0358e6072d45..3a8a4aa13687 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -101,6 +101,7 @@ int aq_ring_init(struct aq_ring_s *self)
101 self->hw_head = 0; 101 self->hw_head = 0;
102 self->sw_head = 0; 102 self->sw_head = 0;
103 self->sw_tail = 0; 103 self->sw_tail = 0;
104 spin_lock_init(&self->header.lock);
104 return 0; 105 return 0;
105} 106}
106 107
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 257254645068..eecd6d1c4d73 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -58,7 +58,8 @@ struct __packed aq_ring_buff_s {
58 u8 len_l2; 58 u8 len_l2;
59 u8 len_l3; 59 u8 len_l3;
60 u8 len_l4; 60 u8 len_l4;
61 u8 rsvd2; 61 u8 is_ipv6:1;
62 u8 rsvd2:7;
62 u32 len_pkt; 63 u32 len_pkt;
63 }; 64 };
64 }; 65 };
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index a2b746a2dd50..4ee15ff06a44 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -433,6 +433,9 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
433 buff->len_l3 + 433 buff->len_l3 +
434 buff->len_l2); 434 buff->len_l2);
435 is_gso = true; 435 is_gso = true;
436
437 if (buff->is_ipv6)
438 txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_IPV6;
436 } else { 439 } else {
437 buff_pa_len = buff->len; 440 buff_pa_len = buff->len;
438 441
@@ -458,6 +461,7 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
458 if (unlikely(buff->is_eop)) { 461 if (unlikely(buff->is_eop)) {
459 txd->ctl |= HW_ATL_A0_TXD_CTL_EOP; 462 txd->ctl |= HW_ATL_A0_TXD_CTL_EOP;
460 txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_WB; 463 txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_WB;
464 is_gso = false;
461 } 465 }
462 } 466 }
463 467
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index cab2931dab9a..42150708191d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -471,6 +471,9 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
471 buff->len_l3 + 471 buff->len_l3 +
472 buff->len_l2); 472 buff->len_l2);
473 is_gso = true; 473 is_gso = true;
474
475 if (buff->is_ipv6)
476 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPV6;
474 } else { 477 } else {
475 buff_pa_len = buff->len; 478 buff_pa_len = buff->len;
476 479
@@ -496,6 +499,7 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
496 if (unlikely(buff->is_eop)) { 499 if (unlikely(buff->is_eop)) {
497 txd->ctl |= HW_ATL_B0_TXD_CTL_EOP; 500 txd->ctl |= HW_ATL_B0_TXD_CTL_EOP;
498 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB; 501 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB;
502 is_gso = false;
499 } 503 }
500 } 504 }
501 505
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 0a23034bbe3f..352beff796ae 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -2277,7 +2277,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2277 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \ 2277 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \
2278 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC)) 2278 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC))
2279 2279
2280#define HW_INTERRUT_ASSERT_SET_0 \ 2280#define HW_INTERRUPT_ASSERT_SET_0 \
2281 (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \ 2281 (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
2282 AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \ 2282 AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
2283 AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \ 2283 AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
@@ -2290,7 +2290,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2290 AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\ 2290 AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\
2291 AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\ 2291 AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\
2292 AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR) 2292 AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR)
2293#define HW_INTERRUT_ASSERT_SET_1 \ 2293#define HW_INTERRUPT_ASSERT_SET_1 \
2294 (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \ 2294 (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \
2295 AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \ 2295 AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \
2296 AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \ 2296 AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \
@@ -2318,7 +2318,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2318 AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \ 2318 AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \
2319 AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\ 2319 AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\
2320 AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR) 2320 AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR)
2321#define HW_INTERRUT_ASSERT_SET_2 \ 2321#define HW_INTERRUPT_ASSERT_SET_2 \
2322 (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \ 2322 (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \
2323 AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \ 2323 AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \
2324 AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \ 2324 AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index ac76fc251d26..a851f95c307a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -4166,14 +4166,14 @@ static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
4166 bnx2x_release_phy_lock(bp); 4166 bnx2x_release_phy_lock(bp);
4167 } 4167 }
4168 4168
4169 if (attn & HW_INTERRUT_ASSERT_SET_0) { 4169 if (attn & HW_INTERRUPT_ASSERT_SET_0) {
4170 4170
4171 val = REG_RD(bp, reg_offset); 4171 val = REG_RD(bp, reg_offset);
4172 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); 4172 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_0);
4173 REG_WR(bp, reg_offset, val); 4173 REG_WR(bp, reg_offset, val);
4174 4174
4175 BNX2X_ERR("FATAL HW block attention set0 0x%x\n", 4175 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
4176 (u32)(attn & HW_INTERRUT_ASSERT_SET_0)); 4176 (u32)(attn & HW_INTERRUPT_ASSERT_SET_0));
4177 bnx2x_panic(); 4177 bnx2x_panic();
4178 } 4178 }
4179} 4179}
@@ -4191,7 +4191,7 @@ static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4191 BNX2X_ERR("FATAL error from DORQ\n"); 4191 BNX2X_ERR("FATAL error from DORQ\n");
4192 } 4192 }
4193 4193
4194 if (attn & HW_INTERRUT_ASSERT_SET_1) { 4194 if (attn & HW_INTERRUPT_ASSERT_SET_1) {
4195 4195
4196 int port = BP_PORT(bp); 4196 int port = BP_PORT(bp);
4197 int reg_offset; 4197 int reg_offset;
@@ -4200,11 +4200,11 @@ static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4200 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); 4200 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
4201 4201
4202 val = REG_RD(bp, reg_offset); 4202 val = REG_RD(bp, reg_offset);
4203 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); 4203 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_1);
4204 REG_WR(bp, reg_offset, val); 4204 REG_WR(bp, reg_offset, val);
4205 4205
4206 BNX2X_ERR("FATAL HW block attention set1 0x%x\n", 4206 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
4207 (u32)(attn & HW_INTERRUT_ASSERT_SET_1)); 4207 (u32)(attn & HW_INTERRUPT_ASSERT_SET_1));
4208 bnx2x_panic(); 4208 bnx2x_panic();
4209 } 4209 }
4210} 4210}
@@ -4235,7 +4235,7 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4235 } 4235 }
4236 } 4236 }
4237 4237
4238 if (attn & HW_INTERRUT_ASSERT_SET_2) { 4238 if (attn & HW_INTERRUPT_ASSERT_SET_2) {
4239 4239
4240 int port = BP_PORT(bp); 4240 int port = BP_PORT(bp);
4241 int reg_offset; 4241 int reg_offset;
@@ -4244,11 +4244,11 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4244 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); 4244 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
4245 4245
4246 val = REG_RD(bp, reg_offset); 4246 val = REG_RD(bp, reg_offset);
4247 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); 4247 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_2);
4248 REG_WR(bp, reg_offset, val); 4248 REG_WR(bp, reg_offset, val);
4249 4249
4250 BNX2X_ERR("FATAL HW block attention set2 0x%x\n", 4250 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
4251 (u32)(attn & HW_INTERRUT_ASSERT_SET_2)); 4251 (u32)(attn & HW_INTERRUPT_ASSERT_SET_2));
4252 bnx2x_panic(); 4252 bnx2x_panic();
4253 } 4253 }
4254} 4254}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 32de4589d16a..1f1e54ba0ecb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1983,20 +1983,25 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
1983 1983
1984 for (j = 0; j < max_idx; j++) { 1984 for (j = 0; j < max_idx; j++) {
1985 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; 1985 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
1986 dma_addr_t mapping = rx_buf->mapping;
1986 void *data = rx_buf->data; 1987 void *data = rx_buf->data;
1987 1988
1988 if (!data) 1989 if (!data)
1989 continue; 1990 continue;
1990 1991
1991 dma_unmap_single(&pdev->dev, rx_buf->mapping,
1992 bp->rx_buf_use_size, bp->rx_dir);
1993
1994 rx_buf->data = NULL; 1992 rx_buf->data = NULL;
1995 1993
1996 if (BNXT_RX_PAGE_MODE(bp)) 1994 if (BNXT_RX_PAGE_MODE(bp)) {
1995 mapping -= bp->rx_dma_offset;
1996 dma_unmap_page(&pdev->dev, mapping,
1997 PAGE_SIZE, bp->rx_dir);
1997 __free_page(data); 1998 __free_page(data);
1998 else 1999 } else {
2000 dma_unmap_single(&pdev->dev, mapping,
2001 bp->rx_buf_use_size,
2002 bp->rx_dir);
1999 kfree(data); 2003 kfree(data);
2004 }
2000 } 2005 }
2001 2006
2002 for (j = 0; j < max_agg_idx; j++) { 2007 for (j = 0; j < max_agg_idx; j++) {
@@ -2455,6 +2460,18 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2455 return 0; 2460 return 0;
2456} 2461}
2457 2462
2463static void bnxt_init_cp_rings(struct bnxt *bp)
2464{
2465 int i;
2466
2467 for (i = 0; i < bp->cp_nr_rings; i++) {
2468 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
2469 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
2470
2471 ring->fw_ring_id = INVALID_HW_RING_ID;
2472 }
2473}
2474
2458static int bnxt_init_rx_rings(struct bnxt *bp) 2475static int bnxt_init_rx_rings(struct bnxt *bp)
2459{ 2476{
2460 int i, rc = 0; 2477 int i, rc = 0;
@@ -4732,7 +4749,7 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
4732 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); 4749 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
4733 if (rc) { 4750 if (rc) {
4734 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", 4751 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
4735 rc, i); 4752 i, rc);
4736 return rc; 4753 return rc;
4737 } 4754 }
4738 } 4755 }
@@ -5006,6 +5023,7 @@ static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
5006 5023
5007static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) 5024static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
5008{ 5025{
5026 bnxt_init_cp_rings(bp);
5009 bnxt_init_rx_rings(bp); 5027 bnxt_init_rx_rings(bp);
5010 bnxt_init_tx_rings(bp); 5028 bnxt_init_tx_rings(bp);
5011 bnxt_init_ring_grps(bp, irq_re_init); 5029 bnxt_init_ring_grps(bp, irq_re_init);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 9e59663a6ead..0f6811860ad5 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -1930,13 +1930,13 @@ static void
1930bfa_ioc_send_enable(struct bfa_ioc *ioc) 1930bfa_ioc_send_enable(struct bfa_ioc *ioc)
1931{ 1931{
1932 struct bfi_ioc_ctrl_req enable_req; 1932 struct bfi_ioc_ctrl_req enable_req;
1933 struct timeval tv;
1934 1933
1935 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, 1934 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1936 bfa_ioc_portid(ioc)); 1935 bfa_ioc_portid(ioc));
1937 enable_req.clscode = htons(ioc->clscode); 1936 enable_req.clscode = htons(ioc->clscode);
1938 do_gettimeofday(&tv); 1937 enable_req.rsvd = htons(0);
1939 enable_req.tv_sec = ntohl(tv.tv_sec); 1938 /* overflow in 2106 */
1939 enable_req.tv_sec = ntohl(ktime_get_real_seconds());
1940 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req)); 1940 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1941} 1941}
1942 1942
@@ -1947,6 +1947,10 @@ bfa_ioc_send_disable(struct bfa_ioc *ioc)
1947 1947
1948 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ, 1948 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1949 bfa_ioc_portid(ioc)); 1949 bfa_ioc_portid(ioc));
1950 disable_req.clscode = htons(ioc->clscode);
1951 disable_req.rsvd = htons(0);
1952 /* overflow in 2106 */
1953 disable_req.tv_sec = ntohl(ktime_get_real_seconds());
1950 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req)); 1954 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1951} 1955}
1952 1956
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 30e855004c57..02dd5246dfae 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -4939,8 +4939,9 @@ static int
4939__be_cmd_set_logical_link_config(struct be_adapter *adapter, 4939__be_cmd_set_logical_link_config(struct be_adapter *adapter,
4940 int link_state, int version, u8 domain) 4940 int link_state, int version, u8 domain)
4941{ 4941{
4942 struct be_mcc_wrb *wrb;
4943 struct be_cmd_req_set_ll_link *req; 4942 struct be_cmd_req_set_ll_link *req;
4943 struct be_mcc_wrb *wrb;
4944 u32 link_config = 0;
4944 int status; 4945 int status;
4945 4946
4946 mutex_lock(&adapter->mcc_lock); 4947 mutex_lock(&adapter->mcc_lock);
@@ -4962,10 +4963,12 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter,
4962 4963
4963 if (link_state == IFLA_VF_LINK_STATE_ENABLE || 4964 if (link_state == IFLA_VF_LINK_STATE_ENABLE ||
4964 link_state == IFLA_VF_LINK_STATE_AUTO) 4965 link_state == IFLA_VF_LINK_STATE_AUTO)
4965 req->link_config |= PLINK_ENABLE; 4966 link_config |= PLINK_ENABLE;
4966 4967
4967 if (link_state == IFLA_VF_LINK_STATE_AUTO) 4968 if (link_state == IFLA_VF_LINK_STATE_AUTO)
4968 req->link_config |= PLINK_TRACK; 4969 link_config |= PLINK_TRACK;
4970
4971 req->link_config = cpu_to_le32(link_config);
4969 4972
4970 status = be_mcc_notify_wait(adapter); 4973 status = be_mcc_notify_wait(adapter);
4971err: 4974err:
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 992ebe973d25..f819843e2bae 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -189,11 +189,9 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
189 189
190 nps_enet_tx_handler(ndev); 190 nps_enet_tx_handler(ndev);
191 work_done = nps_enet_rx_handler(ndev); 191 work_done = nps_enet_rx_handler(ndev);
192 if (work_done < budget) { 192 if ((work_done < budget) && napi_complete_done(napi, work_done)) {
193 u32 buf_int_enable_value = 0; 193 u32 buf_int_enable_value = 0;
194 194
195 napi_complete_done(napi, work_done);
196
197 /* set tx_done and rx_rdy bits */ 195 /* set tx_done and rx_rdy bits */
198 buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT; 196 buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT;
199 buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT; 197 buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 928b0df2b8e0..ade6b3e4ed13 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -28,8 +28,10 @@
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/netdevice.h> 30#include <linux/netdevice.h>
31#include <linux/of.h>
31#include <linux/phy.h> 32#include <linux/phy.h>
32#include <linux/platform_device.h> 33#include <linux/platform_device.h>
34#include <linux/property.h>
33#include <net/ip.h> 35#include <net/ip.h>
34#include <net/ncsi.h> 36#include <net/ncsi.h>
35 37
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 3239d27143b9..bdd8cdd732fb 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -82,9 +82,12 @@ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
82 else 82 else
83 *link_status = 0; 83 *link_status = 0;
84 84
85 ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb, &sfp_prsnt); 85 if (mac_cb->media_type == HNAE_MEDIA_TYPE_FIBER) {
86 if (!ret) 86 ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb,
87 *link_status = *link_status && sfp_prsnt; 87 &sfp_prsnt);
88 if (!ret)
89 *link_status = *link_status && sfp_prsnt;
90 }
88 91
89 mac_cb->link = *link_status; 92 mac_cb->link = *link_status;
90} 93}
@@ -855,7 +858,7 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
855 of_node_put(np); 858 of_node_put(np);
856 859
857 np = of_parse_phandle(to_of_node(mac_cb->fw_port), 860 np = of_parse_phandle(to_of_node(mac_cb->fw_port),
858 "serdes-syscon", 0); 861 "serdes-syscon", 0);
859 syscon = syscon_node_to_regmap(np); 862 syscon = syscon_node_to_regmap(np);
860 of_node_put(np); 863 of_node_put(np);
861 if (IS_ERR_OR_NULL(syscon)) { 864 if (IS_ERR_OR_NULL(syscon)) {
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 90dbda792614..403ea9db6dbd 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -1519,6 +1519,7 @@ static void hns_dsaf_set_mac_key(
1519 mac_key->high.bits.mac_3 = addr[3]; 1519 mac_key->high.bits.mac_3 = addr[3];
1520 mac_key->low.bits.mac_4 = addr[4]; 1520 mac_key->low.bits.mac_4 = addr[4];
1521 mac_key->low.bits.mac_5 = addr[5]; 1521 mac_key->low.bits.mac_5 = addr[5];
1522 mac_key->low.bits.port_vlan = 0;
1522 dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_VLAN_M, 1523 dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_VLAN_M,
1523 DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id); 1524 DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id);
1524 dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M, 1525 dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M,
@@ -2924,10 +2925,11 @@ void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
2924 /* find the tcam entry index for promisc */ 2925 /* find the tcam entry index for promisc */
2925 entry_index = dsaf_promisc_tcam_entry(port); 2926 entry_index = dsaf_promisc_tcam_entry(port);
2926 2927
2928 memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data));
2929 memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
2930
2927 /* config key mask */ 2931 /* config key mask */
2928 if (enable) { 2932 if (enable) {
2929 memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data));
2930 memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
2931 dsaf_set_field(tbl_tcam_data.low.bits.port_vlan, 2933 dsaf_set_field(tbl_tcam_data.low.bits.port_vlan,
2932 DSAF_TBL_TCAM_KEY_PORT_M, 2934 DSAF_TBL_TCAM_KEY_PORT_M,
2933 DSAF_TBL_TCAM_KEY_PORT_S, port); 2935 DSAF_TBL_TCAM_KEY_PORT_S, port);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index a2c22d084ce9..e13aa064a8e9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -461,6 +461,32 @@ int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
461 return 0; 461 return 0;
462} 462}
463 463
464int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
465{
466 union acpi_object *obj;
467 union acpi_object obj_args, argv4;
468
469 obj_args.integer.type = ACPI_TYPE_INTEGER;
470 obj_args.integer.value = mac_cb->mac_id;
471
472 argv4.type = ACPI_TYPE_PACKAGE,
473 argv4.package.count = 1,
474 argv4.package.elements = &obj_args,
475
476 obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
477 hns_dsaf_acpi_dsm_uuid, 0,
478 HNS_OP_GET_SFP_STAT_FUNC, &argv4);
479
480 if (!obj || obj->type != ACPI_TYPE_INTEGER)
481 return -ENODEV;
482
483 *sfp_prsnt = obj->integer.value;
484
485 ACPI_FREE(obj);
486
487 return 0;
488}
489
464/** 490/**
465 * hns_mac_config_sds_loopback - set loop back for serdes 491 * hns_mac_config_sds_loopback - set loop back for serdes
466 * @mac_cb: mac control block 492 * @mac_cb: mac control block
@@ -592,7 +618,7 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
592 misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi; 618 misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi;
593 619
594 misc_op->get_phy_if = hns_mac_get_phy_if_acpi; 620 misc_op->get_phy_if = hns_mac_get_phy_if_acpi;
595 misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt; 621 misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt_acpi;
596 622
597 misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback_acpi; 623 misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback_acpi;
598 } else { 624 } else {
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 2175cced402f..e9af89ad039c 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6274,8 +6274,8 @@ static int e1000e_pm_freeze(struct device *dev)
6274 /* Quiesce the device without resetting the hardware */ 6274 /* Quiesce the device without resetting the hardware */
6275 e1000e_down(adapter, false); 6275 e1000e_down(adapter, false);
6276 e1000_free_irq(adapter); 6276 e1000_free_irq(adapter);
6277 e1000e_reset_interrupt_capability(adapter);
6278 } 6277 }
6278 e1000e_reset_interrupt_capability(adapter);
6279 6279
6280 /* Allow time for pending master requests to run */ 6280 /* Allow time for pending master requests to run */
6281 e1000e_disable_pcie_master(&adapter->hw); 6281 e1000e_disable_pcie_master(&adapter->hw);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index e8a8351c8ea9..82a95cc2c8ee 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4438,8 +4438,12 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4438 if (!vsi->netdev) 4438 if (!vsi->netdev)
4439 return; 4439 return;
4440 4440
4441 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 4441 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4442 napi_enable(&vsi->q_vectors[q_idx]->napi); 4442 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4443
4444 if (q_vector->rx.ring || q_vector->tx.ring)
4445 napi_enable(&q_vector->napi);
4446 }
4443} 4447}
4444 4448
4445/** 4449/**
@@ -4453,8 +4457,12 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4453 if (!vsi->netdev) 4457 if (!vsi->netdev)
4454 return; 4458 return;
4455 4459
4456 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 4460 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4457 napi_disable(&vsi->q_vectors[q_idx]->napi); 4461 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4462
4463 if (q_vector->rx.ring || q_vector->tx.ring)
4464 napi_disable(&q_vector->napi);
4465 }
4458} 4466}
4459 4467
4460/** 4468/**
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 55957246c0e8..b5d5519542e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -294,7 +294,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
294 struct netdev_notifier_changeupper_info *info) 294 struct netdev_notifier_changeupper_info *info)
295{ 295{
296 struct net_device *upper = info->upper_dev, *ndev_tmp; 296 struct net_device *upper = info->upper_dev, *ndev_tmp;
297 struct netdev_lag_upper_info *lag_upper_info; 297 struct netdev_lag_upper_info *lag_upper_info = NULL;
298 bool is_bonded; 298 bool is_bonded;
299 int bond_status = 0; 299 int bond_status = 0;
300 int num_slaves = 0; 300 int num_slaves = 0;
@@ -303,7 +303,8 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
303 if (!netif_is_lag_master(upper)) 303 if (!netif_is_lag_master(upper))
304 return 0; 304 return 0;
305 305
306 lag_upper_info = info->upper_info; 306 if (info->linking)
307 lag_upper_info = info->upper_info;
307 308
308 /* The event may still be of interest if the slave does not belong to 309 /* The event may still be of interest if the slave does not belong to
309 * us, but is enslaved to a master which has one or more of our netdevs 310 * us, but is enslaved to a master which has one or more of our netdevs
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 06c9f4100cb9..6ad44be08b33 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -25,6 +25,7 @@
25#include <linux/of_irq.h> 25#include <linux/of_irq.h>
26#include <linux/crc32.h> 26#include <linux/crc32.h>
27#include <linux/crc32c.h> 27#include <linux/crc32c.h>
28#include <linux/circ_buf.h>
28 29
29#include "moxart_ether.h" 30#include "moxart_ether.h"
30 31
@@ -278,6 +279,13 @@ rx_next:
278 return rx; 279 return rx;
279} 280}
280 281
282static int moxart_tx_queue_space(struct net_device *ndev)
283{
284 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
285
286 return CIRC_SPACE(priv->tx_head, priv->tx_tail, TX_DESC_NUM);
287}
288
281static void moxart_tx_finished(struct net_device *ndev) 289static void moxart_tx_finished(struct net_device *ndev)
282{ 290{
283 struct moxart_mac_priv_t *priv = netdev_priv(ndev); 291 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
@@ -297,6 +305,9 @@ static void moxart_tx_finished(struct net_device *ndev)
297 tx_tail = TX_NEXT(tx_tail); 305 tx_tail = TX_NEXT(tx_tail);
298 } 306 }
299 priv->tx_tail = tx_tail; 307 priv->tx_tail = tx_tail;
308 if (netif_queue_stopped(ndev) &&
309 moxart_tx_queue_space(ndev) >= TX_WAKE_THRESHOLD)
310 netif_wake_queue(ndev);
300} 311}
301 312
302static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id) 313static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
@@ -324,13 +335,18 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
324 struct moxart_mac_priv_t *priv = netdev_priv(ndev); 335 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
325 void *desc; 336 void *desc;
326 unsigned int len; 337 unsigned int len;
327 unsigned int tx_head = priv->tx_head; 338 unsigned int tx_head;
328 u32 txdes1; 339 u32 txdes1;
329 int ret = NETDEV_TX_BUSY; 340 int ret = NETDEV_TX_BUSY;
330 341
342 spin_lock_irq(&priv->txlock);
343
344 tx_head = priv->tx_head;
331 desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head); 345 desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
332 346
333 spin_lock_irq(&priv->txlock); 347 if (moxart_tx_queue_space(ndev) == 1)
348 netif_stop_queue(ndev);
349
334 if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) { 350 if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
335 net_dbg_ratelimited("no TX space for packet\n"); 351 net_dbg_ratelimited("no TX space for packet\n");
336 priv->stats.tx_dropped++; 352 priv->stats.tx_dropped++;
diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h
index 93a9563ac7c6..afc32ec998c0 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.h
+++ b/drivers/net/ethernet/moxa/moxart_ether.h
@@ -59,6 +59,7 @@
59#define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM_MASK)) 59#define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM_MASK))
60#define TX_BUF_SIZE 1600 60#define TX_BUF_SIZE 1600
61#define TX_BUF_SIZE_MAX (TX_DESC1_BUF_SIZE_MASK+1) 61#define TX_BUF_SIZE_MAX (TX_DESC1_BUF_SIZE_MASK+1)
62#define TX_WAKE_THRESHOLD 16
62 63
63#define RX_DESC_NUM 64 64#define RX_DESC_NUM 64
64#define RX_DESC_NUM_MASK (RX_DESC_NUM-1) 65#define RX_DESC_NUM_MASK (RX_DESC_NUM-1)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 9179a99563af..a41377e26c07 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -3275,9 +3275,10 @@ void nfp_net_netdev_clean(struct net_device *netdev)
3275{ 3275{
3276 struct nfp_net *nn = netdev_priv(netdev); 3276 struct nfp_net *nn = netdev_priv(netdev);
3277 3277
3278 unregister_netdev(nn->netdev);
3279
3278 if (nn->xdp_prog) 3280 if (nn->xdp_prog)
3279 bpf_prog_put(nn->xdp_prog); 3281 bpf_prog_put(nn->xdp_prog);
3280 if (nn->bpf_offload_xdp) 3282 if (nn->bpf_offload_xdp)
3281 nfp_net_xdp_offload(nn, NULL); 3283 nfp_net_xdp_offload(nn, NULL);
3282 unregister_netdev(nn->netdev);
3283} 3284}
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 7cd76b6b5cb9..2ae852454780 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -2216,18 +2216,15 @@ static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
2216{ 2216{
2217 bool want[OFDPA_CTRL_MAX] = { 0, }; 2217 bool want[OFDPA_CTRL_MAX] = { 0, };
2218 bool prev_ctrls[OFDPA_CTRL_MAX]; 2218 bool prev_ctrls[OFDPA_CTRL_MAX];
2219 u8 uninitialized_var(prev_state); 2219 u8 prev_state;
2220 int err; 2220 int err;
2221 int i; 2221 int i;
2222 2222
2223 if (switchdev_trans_ph_prepare(trans)) { 2223 prev_state = ofdpa_port->stp_state;
2224 memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls)); 2224 if (prev_state == state)
2225 prev_state = ofdpa_port->stp_state;
2226 }
2227
2228 if (ofdpa_port->stp_state == state)
2229 return 0; 2225 return 0;
2230 2226
2227 memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
2231 ofdpa_port->stp_state = state; 2228 ofdpa_port->stp_state = state;
2232 2229
2233 switch (state) { 2230 switch (state) {
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 9f3d9c67e3fe..fa674a8bda0c 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1267,6 +1267,7 @@ static void soft_reset_slave(struct cpsw_slave *slave)
1267static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) 1267static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
1268{ 1268{
1269 u32 slave_port; 1269 u32 slave_port;
1270 struct phy_device *phy;
1270 struct cpsw_common *cpsw = priv->cpsw; 1271 struct cpsw_common *cpsw = priv->cpsw;
1271 1272
1272 soft_reset_slave(slave); 1273 soft_reset_slave(slave);
@@ -1300,27 +1301,28 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
1300 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); 1301 1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
1301 1302
1302 if (slave->data->phy_node) { 1303 if (slave->data->phy_node) {
1303 slave->phy = of_phy_connect(priv->ndev, slave->data->phy_node, 1304 phy = of_phy_connect(priv->ndev, slave->data->phy_node,
1304 &cpsw_adjust_link, 0, slave->data->phy_if); 1305 &cpsw_adjust_link, 0, slave->data->phy_if);
1305 if (!slave->phy) { 1306 if (!phy) {
1306 dev_err(priv->dev, "phy \"%s\" not found on slave %d\n", 1307 dev_err(priv->dev, "phy \"%s\" not found on slave %d\n",
1307 slave->data->phy_node->full_name, 1308 slave->data->phy_node->full_name,
1308 slave->slave_num); 1309 slave->slave_num);
1309 return; 1310 return;
1310 } 1311 }
1311 } else { 1312 } else {
1312 slave->phy = phy_connect(priv->ndev, slave->data->phy_id, 1313 phy = phy_connect(priv->ndev, slave->data->phy_id,
1313 &cpsw_adjust_link, slave->data->phy_if); 1314 &cpsw_adjust_link, slave->data->phy_if);
1314 if (IS_ERR(slave->phy)) { 1315 if (IS_ERR(phy)) {
1315 dev_err(priv->dev, 1316 dev_err(priv->dev,
1316 "phy \"%s\" not found on slave %d, err %ld\n", 1317 "phy \"%s\" not found on slave %d, err %ld\n",
1317 slave->data->phy_id, slave->slave_num, 1318 slave->data->phy_id, slave->slave_num,
1318 PTR_ERR(slave->phy)); 1319 PTR_ERR(phy));
1319 slave->phy = NULL;
1320 return; 1320 return;
1321 } 1321 }
1322 } 1322 }
1323 1323
1324 slave->phy = phy;
1325
1324 phy_attached_info(slave->phy); 1326 phy_attached_info(slave->phy);
1325 1327
1326 phy_start(slave->phy); 1328 phy_start(slave->phy);
@@ -1817,6 +1819,8 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
1817 } 1819 }
1818 1820
1819 cpsw_intr_enable(cpsw); 1821 cpsw_intr_enable(cpsw);
1822 netif_trans_update(ndev);
1823 netif_tx_wake_all_queues(ndev);
1820} 1824}
1821 1825
1822static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) 1826static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index ffedad2a360a..15b920086251 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -418,8 +418,9 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
418 memset(rd, 0, sizeof(*rd)); 418 memset(rd, 0, sizeof(*rd));
419 rd->hw = hwmap + i; 419 rd->hw = hwmap + i;
420 rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA); 420 rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA);
421 if (rd->buf == NULL || 421 if (rd->buf)
422 !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) { 422 busaddr = pci_map_single(pdev, rd->buf, len, dir);
423 if (rd->buf == NULL || pci_dma_mapping_error(pdev, busaddr)) {
423 if (rd->buf) { 424 if (rd->buf) {
424 net_err_ratelimited("%s: failed to create PCI-MAP for %p\n", 425 net_err_ratelimited("%s: failed to create PCI-MAP for %p\n",
425 __func__, rd->buf); 426 __func__, rd->buf);
@@ -430,8 +431,7 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
430 rd = r->rd + j; 431 rd = r->rd + j;
431 busaddr = rd_get_addr(rd); 432 busaddr = rd_get_addr(rd);
432 rd_set_addr_status(rd, 0, 0); 433 rd_set_addr_status(rd, 0, 0);
433 if (busaddr) 434 pci_unmap_single(pdev, busaddr, len, dir);
434 pci_unmap_single(pdev, busaddr, len, dir);
435 kfree(rd->buf); 435 kfree(rd->buf);
436 rd->buf = NULL; 436 rd->buf = NULL;
437 } 437 }
diff --git a/drivers/net/phy/mdio-boardinfo.c b/drivers/net/phy/mdio-boardinfo.c
index 6b988f77da08..61941e29daae 100644
--- a/drivers/net/phy/mdio-boardinfo.c
+++ b/drivers/net/phy/mdio-boardinfo.c
@@ -84,3 +84,4 @@ int mdiobus_register_board_info(const struct mdio_board_info *info,
84 84
85 return 0; 85 return 0;
86} 86}
87EXPORT_SYMBOL(mdiobus_register_board_info);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1be69d8bc909..a2bfc82e95d7 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -681,7 +681,7 @@ void phy_stop_machine(struct phy_device *phydev)
681 cancel_delayed_work_sync(&phydev->state_queue); 681 cancel_delayed_work_sync(&phydev->state_queue);
682 682
683 mutex_lock(&phydev->lock); 683 mutex_lock(&phydev->lock);
684 if (phydev->state > PHY_UP) 684 if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
685 phydev->state = PHY_UP; 685 phydev->state = PHY_UP;
686 mutex_unlock(&phydev->lock); 686 mutex_unlock(&phydev->lock);
687} 687}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 1b52520715ae..f8c81f12d988 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -990,7 +990,7 @@ static void team_port_disable(struct team *team,
990#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ 990#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
991 NETIF_F_RXCSUM | NETIF_F_ALL_TSO) 991 NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
992 992
993static void ___team_compute_features(struct team *team) 993static void __team_compute_features(struct team *team)
994{ 994{
995 struct team_port *port; 995 struct team_port *port;
996 u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; 996 u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
@@ -1023,16 +1023,10 @@ static void ___team_compute_features(struct team *team)
1023 team->dev->priv_flags |= IFF_XMIT_DST_RELEASE; 1023 team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1024} 1024}
1025 1025
1026static void __team_compute_features(struct team *team)
1027{
1028 ___team_compute_features(team);
1029 netdev_change_features(team->dev);
1030}
1031
1032static void team_compute_features(struct team *team) 1026static void team_compute_features(struct team *team)
1033{ 1027{
1034 mutex_lock(&team->lock); 1028 mutex_lock(&team->lock);
1035 ___team_compute_features(team); 1029 __team_compute_features(team);
1036 mutex_unlock(&team->lock); 1030 mutex_unlock(&team->lock);
1037 netdev_change_features(team->dev); 1031 netdev_change_features(team->dev);
1038} 1032}
@@ -1641,6 +1635,7 @@ static void team_uninit(struct net_device *dev)
1641 team_notify_peers_fini(team); 1635 team_notify_peers_fini(team);
1642 team_queue_override_fini(team); 1636 team_queue_override_fini(team);
1643 mutex_unlock(&team->lock); 1637 mutex_unlock(&team->lock);
1638 netdev_change_features(dev);
1644} 1639}
1645 1640
1646static void team_destructor(struct net_device *dev) 1641static void team_destructor(struct net_device *dev)
@@ -1928,6 +1923,10 @@ static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
1928 mutex_lock(&team->lock); 1923 mutex_lock(&team->lock);
1929 err = team_port_add(team, port_dev); 1924 err = team_port_add(team, port_dev);
1930 mutex_unlock(&team->lock); 1925 mutex_unlock(&team->lock);
1926
1927 if (!err)
1928 netdev_change_features(dev);
1929
1931 return err; 1930 return err;
1932} 1931}
1933 1932
@@ -1939,6 +1938,10 @@ static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1939 mutex_lock(&team->lock); 1938 mutex_lock(&team->lock);
1940 err = team_port_del(team, port_dev); 1939 err = team_port_del(team, port_dev);
1941 mutex_unlock(&team->lock); 1940 mutex_unlock(&team->lock);
1941
1942 if (!err)
1943 netdev_change_features(dev);
1944
1942 return err; 1945 return err;
1943} 1946}
1944 1947
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index f5552aaaa77a..f3ae88fdf332 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -532,6 +532,7 @@ static const struct driver_info wwan_info = {
532#define LENOVO_VENDOR_ID 0x17ef 532#define LENOVO_VENDOR_ID 0x17ef
533#define NVIDIA_VENDOR_ID 0x0955 533#define NVIDIA_VENDOR_ID 0x0955
534#define HP_VENDOR_ID 0x03f0 534#define HP_VENDOR_ID 0x03f0
535#define MICROSOFT_VENDOR_ID 0x045e
535 536
536static const struct usb_device_id products[] = { 537static const struct usb_device_id products[] = {
537/* BLACKLIST !! 538/* BLACKLIST !!
@@ -761,6 +762,20 @@ static const struct usb_device_id products[] = {
761 .driver_info = 0, 762 .driver_info = 0,
762}, 763},
763 764
765/* Microsoft Surface 2 dock (based on Realtek RTL8152) */
766{
767 USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07ab, USB_CLASS_COMM,
768 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
769 .driver_info = 0,
770},
771
772/* Microsoft Surface 3 dock (based on Realtek RTL8153) */
773{
774 USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM,
775 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
776 .driver_info = 0,
777},
778
764/* WHITELIST!!! 779/* WHITELIST!!!
765 * 780 *
766 * CDC Ether uses two interfaces, not necessarily consecutive. 781 * CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 156f7f85e486..2474618404f5 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -908,7 +908,7 @@ static const struct usb_device_id products[] = {
908 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 908 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
909 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ 909 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
910 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 910 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
911 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ 911 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
912 {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ 912 {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
913 {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ 913 {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
914 {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ 914 {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 0b1b9188625d..07f788c49d57 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -517,6 +517,7 @@ enum rtl8152_flags {
517 517
518/* Define these values to match your device */ 518/* Define these values to match your device */
519#define VENDOR_ID_REALTEK 0x0bda 519#define VENDOR_ID_REALTEK 0x0bda
520#define VENDOR_ID_MICROSOFT 0x045e
520#define VENDOR_ID_SAMSUNG 0x04e8 521#define VENDOR_ID_SAMSUNG 0x04e8
521#define VENDOR_ID_LENOVO 0x17ef 522#define VENDOR_ID_LENOVO 0x17ef
522#define VENDOR_ID_NVIDIA 0x0955 523#define VENDOR_ID_NVIDIA 0x0955
@@ -1294,6 +1295,7 @@ static void intr_callback(struct urb *urb)
1294 } 1295 }
1295 } else { 1296 } else {
1296 if (netif_carrier_ok(tp->netdev)) { 1297 if (netif_carrier_ok(tp->netdev)) {
1298 netif_stop_queue(tp->netdev);
1297 set_bit(RTL8152_LINK_CHG, &tp->flags); 1299 set_bit(RTL8152_LINK_CHG, &tp->flags);
1298 schedule_delayed_work(&tp->schedule, 0); 1300 schedule_delayed_work(&tp->schedule, 0);
1299 } 1301 }
@@ -3169,6 +3171,9 @@ static void set_carrier(struct r8152 *tp)
3169 napi_enable(&tp->napi); 3171 napi_enable(&tp->napi);
3170 netif_wake_queue(netdev); 3172 netif_wake_queue(netdev);
3171 netif_info(tp, link, netdev, "carrier on\n"); 3173 netif_info(tp, link, netdev, "carrier on\n");
3174 } else if (netif_queue_stopped(netdev) &&
3175 skb_queue_len(&tp->tx_queue) < tp->tx_qlen) {
3176 netif_wake_queue(netdev);
3172 } 3177 }
3173 } else { 3178 } else {
3174 if (netif_carrier_ok(netdev)) { 3179 if (netif_carrier_ok(netdev)) {
@@ -3702,8 +3707,18 @@ static int rtl8152_resume(struct usb_interface *intf)
3702 tp->rtl_ops.autosuspend_en(tp, false); 3707 tp->rtl_ops.autosuspend_en(tp, false);
3703 napi_disable(&tp->napi); 3708 napi_disable(&tp->napi);
3704 set_bit(WORK_ENABLE, &tp->flags); 3709 set_bit(WORK_ENABLE, &tp->flags);
3705 if (netif_carrier_ok(tp->netdev)) 3710
3706 rtl_start_rx(tp); 3711 if (netif_carrier_ok(tp->netdev)) {
3712 if (rtl8152_get_speed(tp) & LINK_STATUS) {
3713 rtl_start_rx(tp);
3714 } else {
3715 netif_carrier_off(tp->netdev);
3716 tp->rtl_ops.disable(tp);
3717 netif_info(tp, link, tp->netdev,
3718 "linking down\n");
3719 }
3720 }
3721
3707 napi_enable(&tp->napi); 3722 napi_enable(&tp->napi);
3708 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3723 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3709 smp_mb__after_atomic(); 3724 smp_mb__after_atomic();
@@ -4507,6 +4522,8 @@ static void rtl8152_disconnect(struct usb_interface *intf)
4507static struct usb_device_id rtl8152_table[] = { 4522static struct usb_device_id rtl8152_table[] = {
4508 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)}, 4523 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)},
4509 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, 4524 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
4525 {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)},
4526 {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)},
4510 {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, 4527 {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
4511 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, 4528 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
4512 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)}, 4529 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)},
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 3de65ea6531a..453244805c52 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1929,7 +1929,7 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1929 " value=0x%04x index=0x%04x size=%d\n", 1929 " value=0x%04x index=0x%04x size=%d\n",
1930 cmd, reqtype, value, index, size); 1930 cmd, reqtype, value, index, size);
1931 1931
1932 if (data) { 1932 if (size) {
1933 buf = kmalloc(size, GFP_KERNEL); 1933 buf = kmalloc(size, GFP_KERNEL);
1934 if (!buf) 1934 if (!buf)
1935 goto out; 1935 goto out;
@@ -1938,8 +1938,13 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1938 err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), 1938 err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
1939 cmd, reqtype, value, index, buf, size, 1939 cmd, reqtype, value, index, buf, size,
1940 USB_CTRL_GET_TIMEOUT); 1940 USB_CTRL_GET_TIMEOUT);
1941 if (err > 0 && err <= size) 1941 if (err > 0 && err <= size) {
1942 memcpy(data, buf, err); 1942 if (data)
1943 memcpy(data, buf, err);
1944 else
1945 netdev_dbg(dev->net,
1946 "Huh? Data requested but thrown away.\n");
1947 }
1943 kfree(buf); 1948 kfree(buf);
1944out: 1949out:
1945 return err; 1950 return err;
@@ -1960,7 +1965,13 @@ static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1960 buf = kmemdup(data, size, GFP_KERNEL); 1965 buf = kmemdup(data, size, GFP_KERNEL);
1961 if (!buf) 1966 if (!buf)
1962 goto out; 1967 goto out;
1963 } 1968 } else {
1969 if (size) {
1970 WARN_ON_ONCE(1);
1971 err = -EINVAL;
1972 goto out;
1973 }
1974 }
1964 1975
1965 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 1976 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
1966 cmd, reqtype, value, index, buf, size, 1977 cmd, reqtype, value, index, buf, size,
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index ea9890d61967..f36584616e7d 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2230,14 +2230,8 @@ static bool virtnet_validate_features(struct virtio_device *vdev)
2230#define MIN_MTU ETH_MIN_MTU 2230#define MIN_MTU ETH_MIN_MTU
2231#define MAX_MTU ETH_MAX_MTU 2231#define MAX_MTU ETH_MAX_MTU
2232 2232
2233static int virtnet_probe(struct virtio_device *vdev) 2233static int virtnet_validate(struct virtio_device *vdev)
2234{ 2234{
2235 int i, err;
2236 struct net_device *dev;
2237 struct virtnet_info *vi;
2238 u16 max_queue_pairs;
2239 int mtu;
2240
2241 if (!vdev->config->get) { 2235 if (!vdev->config->get) {
2242 dev_err(&vdev->dev, "%s failure: config access disabled\n", 2236 dev_err(&vdev->dev, "%s failure: config access disabled\n",
2243 __func__); 2237 __func__);
@@ -2247,6 +2241,25 @@ static int virtnet_probe(struct virtio_device *vdev)
2247 if (!virtnet_validate_features(vdev)) 2241 if (!virtnet_validate_features(vdev))
2248 return -EINVAL; 2242 return -EINVAL;
2249 2243
2244 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
2245 int mtu = virtio_cread16(vdev,
2246 offsetof(struct virtio_net_config,
2247 mtu));
2248 if (mtu < MIN_MTU)
2249 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
2250 }
2251
2252 return 0;
2253}
2254
2255static int virtnet_probe(struct virtio_device *vdev)
2256{
2257 int i, err;
2258 struct net_device *dev;
2259 struct virtnet_info *vi;
2260 u16 max_queue_pairs;
2261 int mtu;
2262
2250 /* Find if host supports multiqueue virtio_net device */ 2263 /* Find if host supports multiqueue virtio_net device */
2251 err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, 2264 err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
2252 struct virtio_net_config, 2265 struct virtio_net_config,
@@ -2362,11 +2375,20 @@ static int virtnet_probe(struct virtio_device *vdev)
2362 offsetof(struct virtio_net_config, 2375 offsetof(struct virtio_net_config,
2363 mtu)); 2376 mtu));
2364 if (mtu < dev->min_mtu) { 2377 if (mtu < dev->min_mtu) {
2365 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); 2378 /* Should never trigger: MTU was previously validated
2366 } else { 2379 * in virtnet_validate.
2367 dev->mtu = mtu; 2380 */
2368 dev->max_mtu = mtu; 2381 dev_err(&vdev->dev, "device MTU appears to have changed "
2382 "it is now %d < %d", mtu, dev->min_mtu);
2383 goto free_stats;
2369 } 2384 }
2385
2386 dev->mtu = mtu;
2387 dev->max_mtu = mtu;
2388
2389 /* TODO: size buffers correctly in this case. */
2390 if (dev->mtu > ETH_DATA_LEN)
2391 vi->big_packets = true;
2370 } 2392 }
2371 2393
2372 if (vi->any_header_sg) 2394 if (vi->any_header_sg)
@@ -2544,6 +2566,7 @@ static struct virtio_driver virtio_net_driver = {
2544 .driver.name = KBUILD_MODNAME, 2566 .driver.name = KBUILD_MODNAME,
2545 .driver.owner = THIS_MODULE, 2567 .driver.owner = THIS_MODULE,
2546 .id_table = id_table, 2568 .id_table = id_table,
2569 .validate = virtnet_validate,
2547 .probe = virtnet_probe, 2570 .probe = virtnet_probe,
2548 .remove = virtnet_remove, 2571 .remove = virtnet_remove,
2549 .config_changed = virtnet_config_changed, 2572 .config_changed = virtnet_config_changed,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index de19c7c92bc6..85d949e03f79 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -2238,14 +2238,16 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
2238 struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy); 2238 struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
2239 struct brcmf_p2p_info *p2p = &cfg->p2p; 2239 struct brcmf_p2p_info *p2p = &cfg->p2p;
2240 struct brcmf_cfg80211_vif *vif; 2240 struct brcmf_cfg80211_vif *vif;
2241 enum nl80211_iftype iftype;
2241 bool wait_for_disable = false; 2242 bool wait_for_disable = false;
2242 int err; 2243 int err;
2243 2244
2244 brcmf_dbg(TRACE, "delete P2P vif\n"); 2245 brcmf_dbg(TRACE, "delete P2P vif\n");
2245 vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); 2246 vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
2246 2247
2248 iftype = vif->wdev.iftype;
2247 brcmf_cfg80211_arm_vif_event(cfg, vif); 2249 brcmf_cfg80211_arm_vif_event(cfg, vif);
2248 switch (vif->wdev.iftype) { 2250 switch (iftype) {
2249 case NL80211_IFTYPE_P2P_CLIENT: 2251 case NL80211_IFTYPE_P2P_CLIENT:
2250 if (test_bit(BRCMF_VIF_STATUS_DISCONNECTING, &vif->sme_state)) 2252 if (test_bit(BRCMF_VIF_STATUS_DISCONNECTING, &vif->sme_state))
2251 wait_for_disable = true; 2253 wait_for_disable = true;
@@ -2275,7 +2277,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
2275 BRCMF_P2P_DISABLE_TIMEOUT); 2277 BRCMF_P2P_DISABLE_TIMEOUT);
2276 2278
2277 err = 0; 2279 err = 0;
2278 if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE) { 2280 if (iftype != NL80211_IFTYPE_P2P_DEVICE) {
2279 brcmf_vif_clear_mgmt_ies(vif); 2281 brcmf_vif_clear_mgmt_ies(vif);
2280 err = brcmf_p2p_release_p2p_if(vif); 2282 err = brcmf_p2p_release_p2p_if(vif);
2281 } 2283 }
@@ -2291,7 +2293,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
2291 brcmf_remove_interface(vif->ifp, true); 2293 brcmf_remove_interface(vif->ifp, true);
2292 2294
2293 brcmf_cfg80211_arm_vif_event(cfg, NULL); 2295 brcmf_cfg80211_arm_vif_event(cfg, NULL);
2294 if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE) 2296 if (iftype != NL80211_IFTYPE_P2P_DEVICE)
2295 p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL; 2297 p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL;
2296 2298
2297 return err; 2299 return err;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index a260cd503200..077bfd8f4c0c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1056,6 +1056,8 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
1056 1056
1057 if (ret) 1057 if (ret)
1058 return ret; 1058 return ret;
1059 if (count == 0)
1060 return 0;
1059 1061
1060 iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, buf, 1062 iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, buf,
1061 (count - 1), NULL); 1063 (count - 1), NULL);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 99132ea16ede..c5734e1a02d2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -216,7 +216,8 @@ u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif)
216 qmask |= BIT(vif->hw_queue[ac]); 216 qmask |= BIT(vif->hw_queue[ac]);
217 } 217 }
218 218
219 if (vif->type == NL80211_IFTYPE_AP) 219 if (vif->type == NL80211_IFTYPE_AP ||
220 vif->type == NL80211_IFTYPE_ADHOC)
220 qmask |= BIT(vif->cab_queue); 221 qmask |= BIT(vif->cab_queue);
221 222
222 return qmask; 223 return qmask;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 6927caecd48e..486dcceed17a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -2401,7 +2401,7 @@ void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
2401 return; 2401 return;
2402 2402
2403 rcu_read_lock(); 2403 rcu_read_lock();
2404 sta = mvm->fw_id_to_mac_id[notif->sta_id]; 2404 sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]);
2405 if (WARN_ON(IS_ERR_OR_NULL(sta))) { 2405 if (WARN_ON(IS_ERR_OR_NULL(sta))) {
2406 rcu_read_unlock(); 2406 rcu_read_unlock();
2407 return; 2407 return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index b51a2853cc80..9d28db7f56aa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1806,7 +1806,8 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1806 iwl_mvm_get_wd_timeout(mvm, vif, false, false); 1806 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
1807 int queue; 1807 int queue;
1808 1808
1809 if (vif->type == NL80211_IFTYPE_AP) 1809 if (vif->type == NL80211_IFTYPE_AP ||
1810 vif->type == NL80211_IFTYPE_ADHOC)
1810 queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; 1811 queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
1811 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) 1812 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
1812 queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; 1813 queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
@@ -1837,7 +1838,8 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1837 * enabled-cab_queue to the mask) 1838 * enabled-cab_queue to the mask)
1838 */ 1839 */
1839 if (iwl_mvm_is_dqa_supported(mvm) && 1840 if (iwl_mvm_is_dqa_supported(mvm) &&
1840 vif->type == NL80211_IFTYPE_AP) { 1841 (vif->type == NL80211_IFTYPE_AP ||
1842 vif->type == NL80211_IFTYPE_ADHOC)) {
1841 struct iwl_trans_txq_scd_cfg cfg = { 1843 struct iwl_trans_txq_scd_cfg cfg = {
1842 .fifo = IWL_MVM_TX_FIFO_MCAST, 1844 .fifo = IWL_MVM_TX_FIFO_MCAST,
1843 .sta_id = mvmvif->bcast_sta.sta_id, 1845 .sta_id = mvmvif->bcast_sta.sta_id,
@@ -1862,7 +1864,8 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
1862 1864
1863 lockdep_assert_held(&mvm->mutex); 1865 lockdep_assert_held(&mvm->mutex);
1864 1866
1865 if (vif->type == NL80211_IFTYPE_AP) 1867 if (vif->type == NL80211_IFTYPE_AP ||
1868 vif->type == NL80211_IFTYPE_ADHOC)
1866 iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue, 1869 iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
1867 IWL_MAX_TID_COUNT, 0); 1870 IWL_MAX_TID_COUNT, 0);
1868 1871
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 3f37075f4cde..1ba0a6f55503 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -506,6 +506,7 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
506 506
507 switch (info->control.vif->type) { 507 switch (info->control.vif->type) {
508 case NL80211_IFTYPE_AP: 508 case NL80211_IFTYPE_AP:
509 case NL80211_IFTYPE_ADHOC:
509 /* 510 /*
510 * Handle legacy hostapd as well, where station may be added 511 * Handle legacy hostapd as well, where station may be added
511 * only after assoc. Take care of the case where we send a 512 * only after assoc. Take care of the case where we send a
@@ -517,7 +518,8 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
517 if (info->hw_queue == info->control.vif->cab_queue) 518 if (info->hw_queue == info->control.vif->cab_queue)
518 return info->hw_queue; 519 return info->hw_queue;
519 520
520 WARN_ONCE(1, "fc=0x%02x", le16_to_cpu(fc)); 521 WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
522 "fc=0x%02x", le16_to_cpu(fc));
521 return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; 523 return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
522 case NL80211_IFTYPE_P2P_DEVICE: 524 case NL80211_IFTYPE_P2P_DEVICE:
523 if (ieee80211_is_mgmt(fc)) 525 if (ieee80211_is_mgmt(fc))
@@ -584,7 +586,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
584 iwl_mvm_vif_from_mac80211(info.control.vif); 586 iwl_mvm_vif_from_mac80211(info.control.vif);
585 587
586 if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || 588 if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
587 info.control.vif->type == NL80211_IFTYPE_AP) { 589 info.control.vif->type == NL80211_IFTYPE_AP ||
590 info.control.vif->type == NL80211_IFTYPE_ADHOC) {
588 sta_id = mvmvif->bcast_sta.sta_id; 591 sta_id = mvmvif->bcast_sta.sta_id;
589 queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, 592 queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
590 hdr->frame_control); 593 hdr->frame_control);
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index caea350f05aa..bdc379178e87 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -1742,12 +1742,14 @@ void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val)
1742 unsigned long flags; 1742 unsigned long flags;
1743 struct rtl_c2hcmd *c2hcmd; 1743 struct rtl_c2hcmd *c2hcmd;
1744 1744
1745 c2hcmd = kmalloc(sizeof(*c2hcmd), GFP_KERNEL); 1745 c2hcmd = kmalloc(sizeof(*c2hcmd),
1746 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
1746 1747
1747 if (!c2hcmd) 1748 if (!c2hcmd)
1748 goto label_err; 1749 goto label_err;
1749 1750
1750 c2hcmd->val = kmalloc(len, GFP_KERNEL); 1751 c2hcmd->val = kmalloc(len,
1752 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
1751 1753
1752 if (!c2hcmd->val) 1754 if (!c2hcmd->val)
1753 goto label_err2; 1755 goto label_err2;
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 23d4a1728cdf..351bac8f6503 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -934,8 +934,14 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
934 rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL); 934 rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL);
935 if (rc < 0) 935 if (rc < 0)
936 goto out_unlock; 936 goto out_unlock;
937 nvdimm_bus_unlock(&nvdimm_bus->dev);
938
937 if (copy_to_user(p, buf, buf_len)) 939 if (copy_to_user(p, buf, buf_len))
938 rc = -EFAULT; 940 rc = -EFAULT;
941
942 vfree(buf);
943 return rc;
944
939 out_unlock: 945 out_unlock:
940 nvdimm_bus_unlock(&nvdimm_bus->dev); 946 nvdimm_bus_unlock(&nvdimm_bus->dev);
941 out: 947 out:
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index b3323c0697f6..ca6d572c48fc 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -243,7 +243,15 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
243 } 243 }
244 244
245 if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) { 245 if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
246 if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)) { 246 /*
247 * FIXME: nsio_rw_bytes() may be called from atomic
248 * context in the btt case and nvdimm_clear_poison()
249 * takes a sleeping lock. Until the locking can be
250 * reworked this capability requires that the namespace
251 * is not claimed by btt.
252 */
253 if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
254 && (!ndns->claim || !is_nd_btt(ndns->claim))) {
247 long cleared; 255 long cleared;
248 256
249 cleared = nvdimm_clear_poison(&ndns->dev, offset, size); 257 cleared = nvdimm_clear_poison(&ndns->dev, offset, size);
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 0eedc49e0d47..8b721321be5b 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -395,7 +395,7 @@ EXPORT_SYMBOL_GPL(nvdimm_create);
395 395
396int alias_dpa_busy(struct device *dev, void *data) 396int alias_dpa_busy(struct device *dev, void *data)
397{ 397{
398 resource_size_t map_end, blk_start, new, busy; 398 resource_size_t map_end, blk_start, new;
399 struct blk_alloc_info *info = data; 399 struct blk_alloc_info *info = data;
400 struct nd_mapping *nd_mapping; 400 struct nd_mapping *nd_mapping;
401 struct nd_region *nd_region; 401 struct nd_region *nd_region;
@@ -436,29 +436,19 @@ int alias_dpa_busy(struct device *dev, void *data)
436 retry: 436 retry:
437 /* 437 /*
438 * Find the free dpa from the end of the last pmem allocation to 438 * Find the free dpa from the end of the last pmem allocation to
439 * the end of the interleave-set mapping that is not already 439 * the end of the interleave-set mapping.
440 * covered by a blk allocation.
441 */ 440 */
442 busy = 0;
443 for_each_dpa_resource(ndd, res) { 441 for_each_dpa_resource(ndd, res) {
442 if (strncmp(res->name, "pmem", 4) != 0)
443 continue;
444 if ((res->start >= blk_start && res->start < map_end) 444 if ((res->start >= blk_start && res->start < map_end)
445 || (res->end >= blk_start 445 || (res->end >= blk_start
446 && res->end <= map_end)) { 446 && res->end <= map_end)) {
447 if (strncmp(res->name, "pmem", 4) == 0) { 447 new = max(blk_start, min(map_end + 1, res->end + 1));
448 new = max(blk_start, min(map_end + 1, 448 if (new != blk_start) {
449 res->end + 1)); 449 blk_start = new;
450 if (new != blk_start) { 450 goto retry;
451 blk_start = new; 451 }
452 goto retry;
453 }
454 } else
455 busy += min(map_end, res->end)
456 - max(nd_mapping->start, res->start) + 1;
457 } else if (nd_mapping->start > res->start
458 && map_end < res->end) {
459 /* total eclipse of the PMEM region mapping */
460 busy += nd_mapping->size;
461 break;
462 } 452 }
463 } 453 }
464 454
@@ -470,52 +460,11 @@ int alias_dpa_busy(struct device *dev, void *data)
470 return 1; 460 return 1;
471 } 461 }
472 462
473 info->available -= blk_start - nd_mapping->start + busy; 463 info->available -= blk_start - nd_mapping->start;
474 464
475 return 0; 465 return 0;
476} 466}
477 467
478static int blk_dpa_busy(struct device *dev, void *data)
479{
480 struct blk_alloc_info *info = data;
481 struct nd_mapping *nd_mapping;
482 struct nd_region *nd_region;
483 resource_size_t map_end;
484 int i;
485
486 if (!is_nd_pmem(dev))
487 return 0;
488
489 nd_region = to_nd_region(dev);
490 for (i = 0; i < nd_region->ndr_mappings; i++) {
491 nd_mapping = &nd_region->mapping[i];
492 if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
493 break;
494 }
495
496 if (i >= nd_region->ndr_mappings)
497 return 0;
498
499 map_end = nd_mapping->start + nd_mapping->size - 1;
500 if (info->res->start >= nd_mapping->start
501 && info->res->start < map_end) {
502 if (info->res->end <= map_end) {
503 info->busy = 0;
504 return 1;
505 } else {
506 info->busy -= info->res->end - map_end;
507 return 0;
508 }
509 } else if (info->res->end >= nd_mapping->start
510 && info->res->end <= map_end) {
511 info->busy -= nd_mapping->start - info->res->start;
512 return 0;
513 } else {
514 info->busy -= nd_mapping->size;
515 return 0;
516 }
517}
518
519/** 468/**
520 * nd_blk_available_dpa - account the unused dpa of BLK region 469 * nd_blk_available_dpa - account the unused dpa of BLK region
521 * @nd_mapping: container of dpa-resource-root + labels 470 * @nd_mapping: container of dpa-resource-root + labels
@@ -545,11 +494,7 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
545 for_each_dpa_resource(ndd, res) { 494 for_each_dpa_resource(ndd, res) {
546 if (strncmp(res->name, "blk", 3) != 0) 495 if (strncmp(res->name, "blk", 3) != 0)
547 continue; 496 continue;
548 497 info.available -= resource_size(res);
549 info.res = res;
550 info.busy = resource_size(res);
551 device_for_each_child(&nvdimm_bus->dev, &info, blk_dpa_busy);
552 info.available -= info.busy;
553 } 498 }
554 499
555 return info.available; 500 return info.available;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 9b3b57fef446..9583a5f58a1d 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -270,7 +270,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
270 memset(cmnd, 0, sizeof(*cmnd)); 270 memset(cmnd, 0, sizeof(*cmnd));
271 cmnd->dsm.opcode = nvme_cmd_dsm; 271 cmnd->dsm.opcode = nvme_cmd_dsm;
272 cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); 272 cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
273 cmnd->dsm.nr = segments - 1; 273 cmnd->dsm.nr = cpu_to_le32(segments - 1);
274 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 274 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
275 275
276 req->special_vec.bv_page = virt_to_page(range); 276 req->special_vec.bv_page = virt_to_page(range);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 9690beb15e69..d996ca73d3be 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2023,7 +2023,7 @@ nvme_fc_configure_admin_queue(struct nvme_fc_ctrl *ctrl)
2023 } 2023 }
2024 2024
2025 ctrl->ctrl.sqsize = 2025 ctrl->ctrl.sqsize =
2026 min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); 2026 min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
2027 2027
2028 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); 2028 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
2029 if (error) 2029 if (error)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 47a479f26e5d..16f84eb0b95e 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1606,7 +1606,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
1606 } 1606 }
1607 1607
1608 ctrl->ctrl.sqsize = 1608 ctrl->ctrl.sqsize =
1609 min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); 1609 min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
1610 1610
1611 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); 1611 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
1612 if (error) 1612 if (error)
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index a7bcff45f437..76450b0c55f1 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -100,7 +100,7 @@ static u16 nvmet_get_smart_log(struct nvmet_req *req,
100 u16 status; 100 u16 status;
101 101
102 WARN_ON(req == NULL || slog == NULL); 102 WARN_ON(req == NULL || slog == NULL);
103 if (req->cmd->get_log_page.nsid == 0xFFFFFFFF) 103 if (req->cmd->get_log_page.nsid == cpu_to_le32(0xFFFFFFFF))
104 status = nvmet_get_smart_log_all(req, slog); 104 status = nvmet_get_smart_log_all(req, slog);
105 else 105 else
106 status = nvmet_get_smart_log_nsid(req, slog); 106 status = nvmet_get_smart_log_nsid(req, slog);
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
index 4195115c7e54..6b0baa9caab9 100644
--- a/drivers/nvme/target/io-cmd.c
+++ b/drivers/nvme/target/io-cmd.c
@@ -180,7 +180,7 @@ static void nvmet_execute_write_zeroes(struct nvmet_req *req)
180 180
181 sector = le64_to_cpu(write_zeroes->slba) << 181 sector = le64_to_cpu(write_zeroes->slba) <<
182 (req->ns->blksize_shift - 9); 182 (req->ns->blksize_shift - 9);
183 nr_sector = (((sector_t)le32_to_cpu(write_zeroes->length)) << 183 nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length)) <<
184 (req->ns->blksize_shift - 9)) + 1; 184 (req->ns->blksize_shift - 9)) + 1;
185 185
186 if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector, 186 if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
@@ -230,7 +230,7 @@ int nvmet_parse_io_cmd(struct nvmet_req *req)
230 return 0; 230 return 0;
231 case nvme_cmd_dsm: 231 case nvme_cmd_dsm:
232 req->execute = nvmet_execute_dsm; 232 req->execute = nvmet_execute_dsm;
233 req->data_len = le32_to_cpu(cmd->dsm.nr + 1) * 233 req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
234 sizeof(struct nvme_dsm_range); 234 sizeof(struct nvme_dsm_range);
235 return 0; 235 return 0;
236 case nvme_cmd_write_zeroes: 236 case nvme_cmd_write_zeroes:
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 22f7bc6bac7f..c7b0b6a52708 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -392,7 +392,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
392 } 392 }
393 393
394 ctrl->ctrl.sqsize = 394 ctrl->ctrl.sqsize =
395 min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); 395 min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
396 396
397 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); 397 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
398 if (error) 398 if (error)
diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig
index dfb8a69afc28..d2d2ba5b8a68 100644
--- a/drivers/pci/dwc/Kconfig
+++ b/drivers/pci/dwc/Kconfig
@@ -89,6 +89,7 @@ config PCI_HISI
89 depends on PCI_MSI_IRQ_DOMAIN 89 depends on PCI_MSI_IRQ_DOMAIN
90 select PCIEPORTBUS 90 select PCIEPORTBUS
91 select PCIE_DW_HOST 91 select PCIE_DW_HOST
92 select PCI_HOST_COMMON
92 help 93 help
93 Say Y here if you want PCIe controller support on HiSilicon 94 Say Y here if you want PCIe controller support on HiSilicon
94 Hip05 and Hip06 SoCs 95 Hip05 and Hip06 SoCs
diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c
index fcd3ef845883..6d23683c0892 100644
--- a/drivers/pci/dwc/pcie-artpec6.c
+++ b/drivers/pci/dwc/pcie-artpec6.c
@@ -234,6 +234,9 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
234 return 0; 234 return 0;
235} 235}
236 236
237static const struct dw_pcie_ops dw_pcie_ops = {
238};
239
237static int artpec6_pcie_probe(struct platform_device *pdev) 240static int artpec6_pcie_probe(struct platform_device *pdev)
238{ 241{
239 struct device *dev = &pdev->dev; 242 struct device *dev = &pdev->dev;
@@ -252,6 +255,7 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
252 return -ENOMEM; 255 return -ENOMEM;
253 256
254 pci->dev = dev; 257 pci->dev = dev;
258 pci->ops = &dw_pcie_ops;
255 259
256 artpec6_pcie->pci = pci; 260 artpec6_pcie->pci = pci;
257 261
diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c
index b6c832ba39dd..f20d494922ab 100644
--- a/drivers/pci/dwc/pcie-designware-plat.c
+++ b/drivers/pci/dwc/pcie-designware-plat.c
@@ -86,6 +86,9 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
86 return 0; 86 return 0;
87} 87}
88 88
89static const struct dw_pcie_ops dw_pcie_ops = {
90};
91
89static int dw_plat_pcie_probe(struct platform_device *pdev) 92static int dw_plat_pcie_probe(struct platform_device *pdev)
90{ 93{
91 struct device *dev = &pdev->dev; 94 struct device *dev = &pdev->dev;
@@ -103,6 +106,7 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
103 return -ENOMEM; 106 return -ENOMEM;
104 107
105 pci->dev = dev; 108 pci->dev = dev;
109 pci->ops = &dw_pcie_ops;
106 110
107 dw_plat_pcie->pci = pci; 111 dw_plat_pcie->pci = pci;
108 112
diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c
index 52b5bdccf5f0..6e031b522529 100644
--- a/drivers/pci/host/pci-thunder-pem.c
+++ b/drivers/pci/host/pci-thunder-pem.c
@@ -14,6 +14,7 @@
14 * Copyright (C) 2015 - 2016 Cavium, Inc. 14 * Copyright (C) 2015 - 2016 Cavium, Inc.
15 */ 15 */
16 16
17#include <linux/bitfield.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/of_address.h> 20#include <linux/of_address.h>
@@ -334,6 +335,49 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
334 335
335#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) 336#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
336 337
338#define PEM_RES_BASE 0x87e0c0000000UL
339#define PEM_NODE_MASK GENMASK(45, 44)
340#define PEM_INDX_MASK GENMASK(26, 24)
341#define PEM_MIN_DOM_IN_NODE 4
342#define PEM_MAX_DOM_IN_NODE 10
343
344static void thunder_pem_reserve_range(struct device *dev, int seg,
345 struct resource *r)
346{
347 resource_size_t start = r->start, end = r->end;
348 struct resource *res;
349 const char *regionid;
350
351 regionid = kasprintf(GFP_KERNEL, "PEM RC:%d", seg);
352 if (!regionid)
353 return;
354
355 res = request_mem_region(start, end - start + 1, regionid);
356 if (res)
357 res->flags &= ~IORESOURCE_BUSY;
358 else
359 kfree(regionid);
360
361 dev_info(dev, "%pR %s reserved\n", r,
362 res ? "has been" : "could not be");
363}
364
365static void thunder_pem_legacy_fw(struct acpi_pci_root *root,
366 struct resource *res_pem)
367{
368 int node = acpi_get_node(root->device->handle);
369 int index;
370
371 if (node == NUMA_NO_NODE)
372 node = 0;
373
374 index = root->segment - PEM_MIN_DOM_IN_NODE;
375 index -= node * PEM_MAX_DOM_IN_NODE;
376 res_pem->start = PEM_RES_BASE | FIELD_PREP(PEM_NODE_MASK, node) |
377 FIELD_PREP(PEM_INDX_MASK, index);
378 res_pem->flags = IORESOURCE_MEM;
379}
380
337static int thunder_pem_acpi_init(struct pci_config_window *cfg) 381static int thunder_pem_acpi_init(struct pci_config_window *cfg)
338{ 382{
339 struct device *dev = cfg->parent; 383 struct device *dev = cfg->parent;
@@ -346,10 +390,24 @@ static int thunder_pem_acpi_init(struct pci_config_window *cfg)
346 if (!res_pem) 390 if (!res_pem)
347 return -ENOMEM; 391 return -ENOMEM;
348 392
349 ret = acpi_get_rc_resources(dev, "THRX0002", root->segment, res_pem); 393 ret = acpi_get_rc_resources(dev, "CAVA02B", root->segment, res_pem);
394
395 /*
396 * If we fail to gather resources it means that we run with old
397 * FW where we need to calculate PEM-specific resources manually.
398 */
350 if (ret) { 399 if (ret) {
351 dev_err(dev, "can't get rc base address\n"); 400 thunder_pem_legacy_fw(root, res_pem);
352 return ret; 401 /*
402 * Reserve 64K size PEM specific resources. The full 16M range
403 * size is required for thunder_pem_init() call.
404 */
405 res_pem->end = res_pem->start + SZ_64K - 1;
406 thunder_pem_reserve_range(dev, root->segment, res_pem);
407 res_pem->end = res_pem->start + SZ_16M - 1;
408
409 /* Reserve PCI configuration space as well. */
410 thunder_pem_reserve_range(dev, root->segment, &cfg->res);
353 } 411 }
354 412
355 return thunder_pem_init(dev, cfg, res_pem); 413 return thunder_pem_init(dev, cfg, res_pem);
diff --git a/drivers/pci/host/pcie-iproc-bcma.c b/drivers/pci/host/pcie-iproc-bcma.c
index bd4c9ec25edc..384c27e664fe 100644
--- a/drivers/pci/host/pcie-iproc-bcma.c
+++ b/drivers/pci/host/pcie-iproc-bcma.c
@@ -44,8 +44,7 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
44{ 44{
45 struct device *dev = &bdev->dev; 45 struct device *dev = &bdev->dev;
46 struct iproc_pcie *pcie; 46 struct iproc_pcie *pcie;
47 LIST_HEAD(res); 47 LIST_HEAD(resources);
48 struct resource res_mem;
49 int ret; 48 int ret;
50 49
51 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 50 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
@@ -63,22 +62,23 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
63 62
64 pcie->base_addr = bdev->addr; 63 pcie->base_addr = bdev->addr;
65 64
66 res_mem.start = bdev->addr_s[0]; 65 pcie->mem.start = bdev->addr_s[0];
67 res_mem.end = bdev->addr_s[0] + SZ_128M - 1; 66 pcie->mem.end = bdev->addr_s[0] + SZ_128M - 1;
68 res_mem.name = "PCIe MEM space"; 67 pcie->mem.name = "PCIe MEM space";
69 res_mem.flags = IORESOURCE_MEM; 68 pcie->mem.flags = IORESOURCE_MEM;
70 pci_add_resource(&res, &res_mem); 69 pci_add_resource(&resources, &pcie->mem);
71 70
72 pcie->map_irq = iproc_pcie_bcma_map_irq; 71 pcie->map_irq = iproc_pcie_bcma_map_irq;
73 72
74 ret = iproc_pcie_setup(pcie, &res); 73 ret = iproc_pcie_setup(pcie, &resources);
75 if (ret) 74 if (ret) {
76 dev_err(dev, "PCIe controller setup failed\n"); 75 dev_err(dev, "PCIe controller setup failed\n");
77 76 pci_free_resource_list(&resources);
78 pci_free_resource_list(&res); 77 return ret;
78 }
79 79
80 bcma_set_drvdata(bdev, pcie); 80 bcma_set_drvdata(bdev, pcie);
81 return ret; 81 return 0;
82} 82}
83 83
84static void iproc_pcie_bcma_remove(struct bcma_device *bdev) 84static void iproc_pcie_bcma_remove(struct bcma_device *bdev)
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index f4909bb0b2ad..8c6a327ca6cd 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -51,7 +51,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
51 struct device_node *np = dev->of_node; 51 struct device_node *np = dev->of_node;
52 struct resource reg; 52 struct resource reg;
53 resource_size_t iobase = 0; 53 resource_size_t iobase = 0;
54 LIST_HEAD(res); 54 LIST_HEAD(resources);
55 int ret; 55 int ret;
56 56
57 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 57 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
@@ -96,10 +96,10 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
96 pcie->phy = NULL; 96 pcie->phy = NULL;
97 } 97 }
98 98
99 ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &iobase); 99 ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &resources,
100 &iobase);
100 if (ret) { 101 if (ret) {
101 dev_err(dev, 102 dev_err(dev, "unable to get PCI host bridge resources\n");
102 "unable to get PCI host bridge resources\n");
103 return ret; 103 return ret;
104 } 104 }
105 105
@@ -112,14 +112,15 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
112 pcie->map_irq = of_irq_parse_and_map_pci; 112 pcie->map_irq = of_irq_parse_and_map_pci;
113 } 113 }
114 114
115 ret = iproc_pcie_setup(pcie, &res); 115 ret = iproc_pcie_setup(pcie, &resources);
116 if (ret) 116 if (ret) {
117 dev_err(dev, "PCIe controller setup failed\n"); 117 dev_err(dev, "PCIe controller setup failed\n");
118 118 pci_free_resource_list(&resources);
119 pci_free_resource_list(&res); 119 return ret;
120 }
120 121
121 platform_set_drvdata(pdev, pcie); 122 platform_set_drvdata(pdev, pcie);
122 return ret; 123 return 0;
123} 124}
124 125
125static int iproc_pcie_pltfm_remove(struct platform_device *pdev) 126static int iproc_pcie_pltfm_remove(struct platform_device *pdev)
diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h
index 04fed8e907f1..0bbe2ea44f3e 100644
--- a/drivers/pci/host/pcie-iproc.h
+++ b/drivers/pci/host/pcie-iproc.h
@@ -90,6 +90,7 @@ struct iproc_pcie {
90#ifdef CONFIG_ARM 90#ifdef CONFIG_ARM
91 struct pci_sys_data sysdata; 91 struct pci_sys_data sysdata;
92#endif 92#endif
93 struct resource mem;
93 struct pci_bus *root_bus; 94 struct pci_bus *root_bus;
94 struct phy *phy; 95 struct phy *phy;
95 int (*map_irq)(const struct pci_dev *, u8, u8); 96 int (*map_irq)(const struct pci_dev *, u8, u8);
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index d69046537b75..32822b0d9cd0 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -2010,29 +2010,57 @@ out_err:
2010 return ERR_PTR(ret); 2010 return ERR_PTR(ret);
2011} 2011}
2012 2012
2013static int pinctrl_create_and_start(struct pinctrl_dev *pctldev) 2013static int pinctrl_claim_hogs(struct pinctrl_dev *pctldev)
2014{ 2014{
2015 pctldev->p = create_pinctrl(pctldev->dev, pctldev); 2015 pctldev->p = create_pinctrl(pctldev->dev, pctldev);
2016 if (!IS_ERR(pctldev->p)) { 2016 if (PTR_ERR(pctldev->p) == -ENODEV) {
2017 kref_get(&pctldev->p->users); 2017 dev_dbg(pctldev->dev, "no hogs found\n");
2018 pctldev->hog_default =
2019 pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT);
2020 if (IS_ERR(pctldev->hog_default)) {
2021 dev_dbg(pctldev->dev,
2022 "failed to lookup the default state\n");
2023 } else {
2024 if (pinctrl_select_state(pctldev->p,
2025 pctldev->hog_default))
2026 dev_err(pctldev->dev,
2027 "failed to select default state\n");
2028 }
2029 2018
2030 pctldev->hog_sleep = 2019 return 0;
2031 pinctrl_lookup_state(pctldev->p, 2020 }
2032 PINCTRL_STATE_SLEEP); 2021
2033 if (IS_ERR(pctldev->hog_sleep)) 2022 if (IS_ERR(pctldev->p)) {
2034 dev_dbg(pctldev->dev, 2023 dev_err(pctldev->dev, "error claiming hogs: %li\n",
2035 "failed to lookup the sleep state\n"); 2024 PTR_ERR(pctldev->p));
2025
2026 return PTR_ERR(pctldev->p);
2027 }
2028
2029 kref_get(&pctldev->p->users);
2030 pctldev->hog_default =
2031 pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT);
2032 if (IS_ERR(pctldev->hog_default)) {
2033 dev_dbg(pctldev->dev,
2034 "failed to lookup the default state\n");
2035 } else {
2036 if (pinctrl_select_state(pctldev->p,
2037 pctldev->hog_default))
2038 dev_err(pctldev->dev,
2039 "failed to select default state\n");
2040 }
2041
2042 pctldev->hog_sleep =
2043 pinctrl_lookup_state(pctldev->p,
2044 PINCTRL_STATE_SLEEP);
2045 if (IS_ERR(pctldev->hog_sleep))
2046 dev_dbg(pctldev->dev,
2047 "failed to lookup the sleep state\n");
2048
2049 return 0;
2050}
2051
2052int pinctrl_enable(struct pinctrl_dev *pctldev)
2053{
2054 int error;
2055
2056 error = pinctrl_claim_hogs(pctldev);
2057 if (error) {
2058 dev_err(pctldev->dev, "could not claim hogs: %i\n",
2059 error);
2060 mutex_destroy(&pctldev->mutex);
2061 kfree(pctldev);
2062
2063 return error;
2036 } 2064 }
2037 2065
2038 mutex_lock(&pinctrldev_list_mutex); 2066 mutex_lock(&pinctrldev_list_mutex);
@@ -2043,6 +2071,7 @@ static int pinctrl_create_and_start(struct pinctrl_dev *pctldev)
2043 2071
2044 return 0; 2072 return 0;
2045} 2073}
2074EXPORT_SYMBOL_GPL(pinctrl_enable);
2046 2075
2047/** 2076/**
2048 * pinctrl_register() - register a pin controller device 2077 * pinctrl_register() - register a pin controller device
@@ -2065,25 +2094,30 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
2065 if (IS_ERR(pctldev)) 2094 if (IS_ERR(pctldev))
2066 return pctldev; 2095 return pctldev;
2067 2096
2068 error = pinctrl_create_and_start(pctldev); 2097 error = pinctrl_enable(pctldev);
2069 if (error) { 2098 if (error)
2070 mutex_destroy(&pctldev->mutex);
2071 kfree(pctldev);
2072
2073 return ERR_PTR(error); 2099 return ERR_PTR(error);
2074 }
2075 2100
2076 return pctldev; 2101 return pctldev;
2077 2102
2078} 2103}
2079EXPORT_SYMBOL_GPL(pinctrl_register); 2104EXPORT_SYMBOL_GPL(pinctrl_register);
2080 2105
2106/**
2107 * pinctrl_register_and_init() - register and init pin controller device
2108 * @pctldesc: descriptor for this pin controller
2109 * @dev: parent device for this pin controller
2110 * @driver_data: private pin controller data for this pin controller
2111 * @pctldev: pin controller device
2112 *
2113 * Note that pinctrl_enable() still needs to be manually called after
2114 * this once the driver is ready.
2115 */
2081int pinctrl_register_and_init(struct pinctrl_desc *pctldesc, 2116int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
2082 struct device *dev, void *driver_data, 2117 struct device *dev, void *driver_data,
2083 struct pinctrl_dev **pctldev) 2118 struct pinctrl_dev **pctldev)
2084{ 2119{
2085 struct pinctrl_dev *p; 2120 struct pinctrl_dev *p;
2086 int error;
2087 2121
2088 p = pinctrl_init_controller(pctldesc, dev, driver_data); 2122 p = pinctrl_init_controller(pctldesc, dev, driver_data);
2089 if (IS_ERR(p)) 2123 if (IS_ERR(p))
@@ -2097,15 +2131,6 @@ int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
2097 */ 2131 */
2098 *pctldev = p; 2132 *pctldev = p;
2099 2133
2100 error = pinctrl_create_and_start(p);
2101 if (error) {
2102 mutex_destroy(&p->mutex);
2103 kfree(p);
2104 *pctldev = NULL;
2105
2106 return error;
2107 }
2108
2109 return 0; 2134 return 0;
2110} 2135}
2111EXPORT_SYMBOL_GPL(pinctrl_register_and_init); 2136EXPORT_SYMBOL_GPL(pinctrl_register_and_init);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index a7ace9e1ad81..74bd90dfd7b1 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -790,7 +790,7 @@ int imx_pinctrl_probe(struct platform_device *pdev,
790 790
791 dev_info(&pdev->dev, "initialized IMX pinctrl driver\n"); 791 dev_info(&pdev->dev, "initialized IMX pinctrl driver\n");
792 792
793 return 0; 793 return pinctrl_enable(ipctl->pctl);
794 794
795free: 795free:
796 imx_free_resources(ipctl); 796 imx_free_resources(ipctl);
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index f80134e3e0b6..9ff790174906 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -13,6 +13,7 @@
13 * published by the Free Software Foundation. 13 * published by the Free Software Foundation.
14 */ 14 */
15 15
16#include <linux/dmi.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/module.h> 18#include <linux/module.h>
18#include <linux/init.h> 19#include <linux/init.h>
@@ -1524,10 +1525,31 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
1524 chained_irq_exit(chip, desc); 1525 chained_irq_exit(chip, desc);
1525} 1526}
1526 1527
1528/*
1529 * Certain machines seem to hardcode Linux IRQ numbers in their ACPI
1530 * tables. Since we leave GPIOs that are not capable of generating
1531 * interrupts out of the irqdomain the numbering will be different and
1532 * cause devices using the hardcoded IRQ numbers fail. In order not to
1533 * break such machines we will only mask pins from irqdomain if the machine
1534 * is not listed below.
1535 */
1536static const struct dmi_system_id chv_no_valid_mask[] = {
1537 {
1538 /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */
1539 .ident = "Acer Chromebook (CYAN)",
1540 .matches = {
1541 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
1542 DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"),
1543 DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"),
1544 },
1545 }
1546};
1547
1527static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) 1548static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
1528{ 1549{
1529 const struct chv_gpio_pinrange *range; 1550 const struct chv_gpio_pinrange *range;
1530 struct gpio_chip *chip = &pctrl->chip; 1551 struct gpio_chip *chip = &pctrl->chip;
1552 bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
1531 int ret, i, offset; 1553 int ret, i, offset;
1532 1554
1533 *chip = chv_gpio_chip; 1555 *chip = chv_gpio_chip;
@@ -1536,7 +1558,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
1536 chip->label = dev_name(pctrl->dev); 1558 chip->label = dev_name(pctrl->dev);
1537 chip->parent = pctrl->dev; 1559 chip->parent = pctrl->dev;
1538 chip->base = -1; 1560 chip->base = -1;
1539 chip->irq_need_valid_mask = true; 1561 chip->irq_need_valid_mask = need_valid_mask;
1540 1562
1541 ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl); 1563 ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
1542 if (ret) { 1564 if (ret) {
@@ -1567,7 +1589,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
1567 intsel &= CHV_PADCTRL0_INTSEL_MASK; 1589 intsel &= CHV_PADCTRL0_INTSEL_MASK;
1568 intsel >>= CHV_PADCTRL0_INTSEL_SHIFT; 1590 intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
1569 1591
1570 if (intsel >= pctrl->community->nirqs) 1592 if (need_valid_mask && intsel >= pctrl->community->nirqs)
1571 clear_bit(i, chip->irq_valid_mask); 1593 clear_bit(i, chip->irq_valid_mask);
1572 } 1594 }
1573 1595
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 7671424d46cb..31a3a98d067c 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -667,11 +667,11 @@ static const char * const uart_ao_b_groups[] = {
667}; 667};
668 668
669static const char * const i2c_ao_groups[] = { 669static const char * const i2c_ao_groups[] = {
670 "i2c_sdk_ao", "i2c_sda_ao", 670 "i2c_sck_ao", "i2c_sda_ao",
671}; 671};
672 672
673static const char * const i2c_slave_ao_groups[] = { 673static const char * const i2c_slave_ao_groups[] = {
674 "i2c_slave_sdk_ao", "i2c_slave_sda_ao", 674 "i2c_slave_sck_ao", "i2c_slave_sda_ao",
675}; 675};
676 676
677static const char * const remote_input_ao_groups[] = { 677static const char * const remote_input_ao_groups[] = {
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 8b2d45e85bae..9c267dcda094 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1781,7 +1781,7 @@ static int pcs_probe(struct platform_device *pdev)
1781 dev_info(pcs->dev, "%i pins at pa %p size %u\n", 1781 dev_info(pcs->dev, "%i pins at pa %p size %u\n",
1782 pcs->desc.npins, pcs->base, pcs->size); 1782 pcs->desc.npins, pcs->base, pcs->size);
1783 1783
1784 return 0; 1784 return pinctrl_enable(pcs->pctl);
1785 1785
1786free: 1786free:
1787 pcs_free_resources(pcs); 1787 pcs_free_resources(pcs);
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 676efcc032d2..3ae8066bc127 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1285,6 +1285,22 @@ static void st_gpio_irq_unmask(struct irq_data *d)
1285 writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK); 1285 writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK);
1286} 1286}
1287 1287
1288static int st_gpio_irq_request_resources(struct irq_data *d)
1289{
1290 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
1291
1292 st_gpio_direction_input(gc, d->hwirq);
1293
1294 return gpiochip_lock_as_irq(gc, d->hwirq);
1295}
1296
1297static void st_gpio_irq_release_resources(struct irq_data *d)
1298{
1299 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
1300
1301 gpiochip_unlock_as_irq(gc, d->hwirq);
1302}
1303
1288static int st_gpio_irq_set_type(struct irq_data *d, unsigned type) 1304static int st_gpio_irq_set_type(struct irq_data *d, unsigned type)
1289{ 1305{
1290 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 1306 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -1438,12 +1454,14 @@ static struct gpio_chip st_gpio_template = {
1438}; 1454};
1439 1455
1440static struct irq_chip st_gpio_irqchip = { 1456static struct irq_chip st_gpio_irqchip = {
1441 .name = "GPIO", 1457 .name = "GPIO",
1442 .irq_disable = st_gpio_irq_mask, 1458 .irq_request_resources = st_gpio_irq_request_resources,
1443 .irq_mask = st_gpio_irq_mask, 1459 .irq_release_resources = st_gpio_irq_release_resources,
1444 .irq_unmask = st_gpio_irq_unmask, 1460 .irq_disable = st_gpio_irq_mask,
1445 .irq_set_type = st_gpio_irq_set_type, 1461 .irq_mask = st_gpio_irq_mask,
1446 .flags = IRQCHIP_SKIP_SET_WAKE, 1462 .irq_unmask = st_gpio_irq_unmask,
1463 .irq_set_type = st_gpio_irq_set_type,
1464 .flags = IRQCHIP_SKIP_SET_WAKE,
1447}; 1465};
1448 1466
1449static int st_gpiolib_register_bank(struct st_pinctrl *info, 1467static int st_gpiolib_register_bank(struct st_pinctrl *info,
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
index b68ae424cee2..743d1f458205 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
@@ -405,6 +405,36 @@ static const struct msm_pingroup ipq4019_groups[] = {
405 PINGROUP(67, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 405 PINGROUP(67, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
406 PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 406 PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
407 PINGROUP(69, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 407 PINGROUP(69, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
408 PINGROUP(70, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
409 PINGROUP(71, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
410 PINGROUP(72, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
411 PINGROUP(73, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
412 PINGROUP(74, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
413 PINGROUP(75, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
414 PINGROUP(76, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
415 PINGROUP(77, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
416 PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
417 PINGROUP(79, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
418 PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
419 PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
420 PINGROUP(82, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
421 PINGROUP(83, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
422 PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
423 PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
424 PINGROUP(86, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
425 PINGROUP(87, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
426 PINGROUP(88, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
427 PINGROUP(89, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
428 PINGROUP(90, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
429 PINGROUP(91, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
430 PINGROUP(92, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
431 PINGROUP(93, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
432 PINGROUP(94, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
433 PINGROUP(95, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
434 PINGROUP(96, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
435 PINGROUP(97, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
436 PINGROUP(98, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
437 PINGROUP(99, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
408}; 438};
409 439
410static const struct msm_pinctrl_soc_data ipq4019_pinctrl = { 440static const struct msm_pinctrl_soc_data ipq4019_pinctrl = {
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index c978be5eb9eb..273badd92561 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -609,10 +609,6 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
609 609
610 raw_spin_lock_irqsave(&pctrl->lock, flags); 610 raw_spin_lock_irqsave(&pctrl->lock, flags);
611 611
612 val = readl(pctrl->regs + g->intr_status_reg);
613 val &= ~BIT(g->intr_status_bit);
614 writel(val, pctrl->regs + g->intr_status_reg);
615
616 val = readl(pctrl->regs + g->intr_cfg_reg); 612 val = readl(pctrl->regs + g->intr_cfg_reg);
617 val |= BIT(g->intr_enable_bit); 613 val |= BIT(g->intr_enable_bit);
618 writel(val, pctrl->regs + g->intr_cfg_reg); 614 writel(val, pctrl->regs + g->intr_cfg_reg);
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index f9b49967f512..63e51b56a22a 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -1468,82 +1468,82 @@ const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {
1468 1468
1469/* pin banks of exynos5433 pin-controller - ALIVE */ 1469/* pin banks of exynos5433 pin-controller - ALIVE */
1470static const struct samsung_pin_bank_data exynos5433_pin_banks0[] __initconst = { 1470static const struct samsung_pin_bank_data exynos5433_pin_banks0[] __initconst = {
1471 EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00), 1471 EXYNOS5433_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
1472 EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04), 1472 EXYNOS5433_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
1473 EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08), 1473 EXYNOS5433_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
1474 EXYNOS_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c), 1474 EXYNOS5433_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
1475 EXYNOS_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1), 1475 EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1),
1476 EXYNOS_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1), 1476 EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1),
1477 EXYNOS_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1), 1477 EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1),
1478 EXYNOS_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1), 1478 EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1),
1479 EXYNOS_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1), 1479 EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1),
1480}; 1480};
1481 1481
1482/* pin banks of exynos5433 pin-controller - AUD */ 1482/* pin banks of exynos5433 pin-controller - AUD */
1483static const struct samsung_pin_bank_data exynos5433_pin_banks1[] __initconst = { 1483static const struct samsung_pin_bank_data exynos5433_pin_banks1[] __initconst = {
1484 EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00), 1484 EXYNOS5433_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
1485 EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04), 1485 EXYNOS5433_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
1486}; 1486};
1487 1487
1488/* pin banks of exynos5433 pin-controller - CPIF */ 1488/* pin banks of exynos5433 pin-controller - CPIF */
1489static const struct samsung_pin_bank_data exynos5433_pin_banks2[] __initconst = { 1489static const struct samsung_pin_bank_data exynos5433_pin_banks2[] __initconst = {
1490 EXYNOS_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00), 1490 EXYNOS5433_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
1491}; 1491};
1492 1492
1493/* pin banks of exynos5433 pin-controller - eSE */ 1493/* pin banks of exynos5433 pin-controller - eSE */
1494static const struct samsung_pin_bank_data exynos5433_pin_banks3[] __initconst = { 1494static const struct samsung_pin_bank_data exynos5433_pin_banks3[] __initconst = {
1495 EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00), 1495 EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
1496}; 1496};
1497 1497
1498/* pin banks of exynos5433 pin-controller - FINGER */ 1498/* pin banks of exynos5433 pin-controller - FINGER */
1499static const struct samsung_pin_bank_data exynos5433_pin_banks4[] __initconst = { 1499static const struct samsung_pin_bank_data exynos5433_pin_banks4[] __initconst = {
1500 EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00), 1500 EXYNOS5433_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
1501}; 1501};
1502 1502
1503/* pin banks of exynos5433 pin-controller - FSYS */ 1503/* pin banks of exynos5433 pin-controller - FSYS */
1504static const struct samsung_pin_bank_data exynos5433_pin_banks5[] __initconst = { 1504static const struct samsung_pin_bank_data exynos5433_pin_banks5[] __initconst = {
1505 EXYNOS_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00), 1505 EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
1506 EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04), 1506 EXYNOS5433_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
1507 EXYNOS_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08), 1507 EXYNOS5433_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
1508 EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c), 1508 EXYNOS5433_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c),
1509 EXYNOS_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10), 1509 EXYNOS5433_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10),
1510 EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14), 1510 EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14),
1511}; 1511};
1512 1512
1513/* pin banks of exynos5433 pin-controller - IMEM */ 1513/* pin banks of exynos5433 pin-controller - IMEM */
1514static const struct samsung_pin_bank_data exynos5433_pin_banks6[] __initconst = { 1514static const struct samsung_pin_bank_data exynos5433_pin_banks6[] __initconst = {
1515 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00), 1515 EXYNOS5433_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
1516}; 1516};
1517 1517
1518/* pin banks of exynos5433 pin-controller - NFC */ 1518/* pin banks of exynos5433 pin-controller - NFC */
1519static const struct samsung_pin_bank_data exynos5433_pin_banks7[] __initconst = { 1519static const struct samsung_pin_bank_data exynos5433_pin_banks7[] __initconst = {
1520 EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00), 1520 EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
1521}; 1521};
1522 1522
1523/* pin banks of exynos5433 pin-controller - PERIC */ 1523/* pin banks of exynos5433 pin-controller - PERIC */
1524static const struct samsung_pin_bank_data exynos5433_pin_banks8[] __initconst = { 1524static const struct samsung_pin_bank_data exynos5433_pin_banks8[] __initconst = {
1525 EXYNOS_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00), 1525 EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
1526 EXYNOS_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04), 1526 EXYNOS5433_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
1527 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08), 1527 EXYNOS5433_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
1528 EXYNOS_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c), 1528 EXYNOS5433_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c),
1529 EXYNOS_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10), 1529 EXYNOS5433_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10),
1530 EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14), 1530 EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14),
1531 EXYNOS_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18), 1531 EXYNOS5433_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18),
1532 EXYNOS_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c), 1532 EXYNOS5433_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c),
1533 EXYNOS_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20), 1533 EXYNOS5433_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20),
1534 EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24), 1534 EXYNOS5433_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24),
1535 EXYNOS_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28), 1535 EXYNOS5433_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28),
1536 EXYNOS_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c), 1536 EXYNOS5433_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c),
1537 EXYNOS_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30), 1537 EXYNOS5433_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30),
1538 EXYNOS_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34), 1538 EXYNOS5433_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34),
1539 EXYNOS_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38), 1539 EXYNOS5433_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38),
1540 EXYNOS_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c), 1540 EXYNOS5433_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c),
1541 EXYNOS_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40), 1541 EXYNOS5433_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40),
1542}; 1542};
1543 1543
1544/* pin banks of exynos5433 pin-controller - TOUCH */ 1544/* pin banks of exynos5433 pin-controller - TOUCH */
1545static const struct samsung_pin_bank_data exynos5433_pin_banks9[] __initconst = { 1545static const struct samsung_pin_bank_data exynos5433_pin_banks9[] __initconst = {
1546 EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00), 1546 EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
1547}; 1547};
1548 1548
1549/* 1549/*
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index a473092fb8d2..cd046eb7d705 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
@@ -79,17 +79,6 @@
79 .name = id \ 79 .name = id \
80 } 80 }
81 81
82#define EXYNOS_PIN_BANK_EINTW_EXT(pins, reg, id, offs, pctl_idx) \
83 { \
84 .type = &bank_type_alive, \
85 .pctl_offset = reg, \
86 .nr_pins = pins, \
87 .eint_type = EINT_TYPE_WKUP, \
88 .eint_offset = offs, \
89 .name = id, \
90 .pctl_res_idx = pctl_idx, \
91 } \
92
93#define EXYNOS5433_PIN_BANK_EINTG(pins, reg, id, offs) \ 82#define EXYNOS5433_PIN_BANK_EINTG(pins, reg, id, offs) \
94 { \ 83 { \
95 .type = &exynos5433_bank_type_off, \ 84 .type = &exynos5433_bank_type_off, \
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index f9ddba7decc1..d7aa22cff480 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -988,9 +988,16 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
988 988
989 for (i = 0; i < ctrl->nr_ext_resources + 1; i++) { 989 for (i = 0; i < ctrl->nr_ext_resources + 1; i++) {
990 res = platform_get_resource(pdev, IORESOURCE_MEM, i); 990 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
991 virt_base[i] = devm_ioremap_resource(&pdev->dev, res); 991 if (!res) {
992 if (IS_ERR(virt_base[i])) 992 dev_err(&pdev->dev, "failed to get mem%d resource\n", i);
993 return ERR_CAST(virt_base[i]); 993 return ERR_PTR(-EINVAL);
994 }
995 virt_base[i] = devm_ioremap(&pdev->dev, res->start,
996 resource_size(res));
997 if (!virt_base[i]) {
998 dev_err(&pdev->dev, "failed to ioremap %pR\n", res);
999 return ERR_PTR(-EIO);
1000 }
994 } 1001 }
995 1002
996 bank = d->pin_banks; 1003 bank = d->pin_banks;
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index 08150a321be6..a70157f0acf4 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -816,6 +816,13 @@ int sh_pfc_register_pinctrl(struct sh_pfc *pfc)
816 pmx->pctl_desc.pins = pmx->pins; 816 pmx->pctl_desc.pins = pmx->pins;
817 pmx->pctl_desc.npins = pfc->info->nr_pins; 817 pmx->pctl_desc.npins = pfc->info->nr_pins;
818 818
819 return devm_pinctrl_register_and_init(pfc->dev, &pmx->pctl_desc, pmx, 819 ret = devm_pinctrl_register_and_init(pfc->dev, &pmx->pctl_desc, pmx,
820 &pmx->pctl); 820 &pmx->pctl);
821 if (ret) {
822 dev_err(pfc->dev, "could not register: %i\n", ret);
823
824 return ret;
825 }
826
827 return pinctrl_enable(pmx->pctl);
821} 828}
diff --git a/drivers/pinctrl/ti/Kconfig b/drivers/pinctrl/ti/Kconfig
index 815a88673d38..542077069391 100644
--- a/drivers/pinctrl/ti/Kconfig
+++ b/drivers/pinctrl/ti/Kconfig
@@ -1,6 +1,6 @@
1config PINCTRL_TI_IODELAY 1config PINCTRL_TI_IODELAY
2 tristate "TI IODelay Module pinconf driver" 2 tristate "TI IODelay Module pinconf driver"
3 depends on OF 3 depends on OF && (SOC_DRA7XX || COMPILE_TEST)
4 select GENERIC_PINCTRL_GROUPS 4 select GENERIC_PINCTRL_GROUPS
5 select GENERIC_PINMUX_FUNCTIONS 5 select GENERIC_PINMUX_FUNCTIONS
6 select GENERIC_PINCONF 6 select GENERIC_PINCONF
diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
index 717e3404900c..362c50918c13 100644
--- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
@@ -893,6 +893,8 @@ static int ti_iodelay_probe(struct platform_device *pdev)
893 893
894 platform_set_drvdata(pdev, iod); 894 platform_set_drvdata(pdev, iod);
895 895
896 return pinctrl_enable(iod->pctl);
897
896exit_out: 898exit_out:
897 of_node_put(np); 899 of_node_put(np);
898 return ret; 900 return ret;
diff --git a/drivers/ptp/ptp_kvm.c b/drivers/ptp/ptp_kvm.c
index 09b4df74291e..bb865695d7a6 100644
--- a/drivers/ptp/ptp_kvm.c
+++ b/drivers/ptp/ptp_kvm.c
@@ -193,10 +193,7 @@ static int __init ptp_kvm_init(void)
193 193
194 kvm_ptp_clock.ptp_clock = ptp_clock_register(&kvm_ptp_clock.caps, NULL); 194 kvm_ptp_clock.ptp_clock = ptp_clock_register(&kvm_ptp_clock.caps, NULL);
195 195
196 if (IS_ERR(kvm_ptp_clock.ptp_clock)) 196 return PTR_ERR_OR_ZERO(kvm_ptp_clock.ptp_clock);
197 return PTR_ERR(kvm_ptp_clock.ptp_clock);
198
199 return 0;
200} 197}
201 198
202module_init(ptp_kvm_init); 199module_init(ptp_kvm_init);
diff --git a/drivers/pwm/pwm-lpss-pci.c b/drivers/pwm/pwm-lpss-pci.c
index 053088b9b66e..c1527cb645be 100644
--- a/drivers/pwm/pwm-lpss-pci.c
+++ b/drivers/pwm/pwm-lpss-pci.c
@@ -36,6 +36,14 @@ static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = {
36 .clk_rate = 19200000, 36 .clk_rate = 19200000,
37 .npwm = 4, 37 .npwm = 4,
38 .base_unit_bits = 22, 38 .base_unit_bits = 22,
39 .bypass = true,
40};
41
42/* Tangier */
43static const struct pwm_lpss_boardinfo pwm_lpss_tng_info = {
44 .clk_rate = 19200000,
45 .npwm = 4,
46 .base_unit_bits = 22,
39}; 47};
40 48
41static int pwm_lpss_probe_pci(struct pci_dev *pdev, 49static int pwm_lpss_probe_pci(struct pci_dev *pdev,
@@ -97,7 +105,7 @@ static const struct pci_device_id pwm_lpss_pci_ids[] = {
97 { PCI_VDEVICE(INTEL, 0x0ac8), (unsigned long)&pwm_lpss_bxt_info}, 105 { PCI_VDEVICE(INTEL, 0x0ac8), (unsigned long)&pwm_lpss_bxt_info},
98 { PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&pwm_lpss_byt_info}, 106 { PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&pwm_lpss_byt_info},
99 { PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&pwm_lpss_byt_info}, 107 { PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&pwm_lpss_byt_info},
100 { PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_bxt_info}, 108 { PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_tng_info},
101 { PCI_VDEVICE(INTEL, 0x1ac8), (unsigned long)&pwm_lpss_bxt_info}, 109 { PCI_VDEVICE(INTEL, 0x1ac8), (unsigned long)&pwm_lpss_bxt_info},
102 { PCI_VDEVICE(INTEL, 0x2288), (unsigned long)&pwm_lpss_bsw_info}, 110 { PCI_VDEVICE(INTEL, 0x2288), (unsigned long)&pwm_lpss_bsw_info},
103 { PCI_VDEVICE(INTEL, 0x2289), (unsigned long)&pwm_lpss_bsw_info}, 111 { PCI_VDEVICE(INTEL, 0x2289), (unsigned long)&pwm_lpss_bsw_info},
diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c
index b22b6fdadb9a..5d6ed1507d29 100644
--- a/drivers/pwm/pwm-lpss-platform.c
+++ b/drivers/pwm/pwm-lpss-platform.c
@@ -37,6 +37,7 @@ static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = {
37 .clk_rate = 19200000, 37 .clk_rate = 19200000,
38 .npwm = 4, 38 .npwm = 4,
39 .base_unit_bits = 22, 39 .base_unit_bits = 22,
40 .bypass = true,
40}; 41};
41 42
42static int pwm_lpss_probe_platform(struct platform_device *pdev) 43static int pwm_lpss_probe_platform(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index 689d2c1cbead..8db0d40ccacd 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -57,7 +57,7 @@ static inline void pwm_lpss_write(const struct pwm_device *pwm, u32 value)
57 writel(value, lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM); 57 writel(value, lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM);
58} 58}
59 59
60static int pwm_lpss_update(struct pwm_device *pwm) 60static int pwm_lpss_wait_for_update(struct pwm_device *pwm)
61{ 61{
62 struct pwm_lpss_chip *lpwm = to_lpwm(pwm->chip); 62 struct pwm_lpss_chip *lpwm = to_lpwm(pwm->chip);
63 const void __iomem *addr = lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM; 63 const void __iomem *addr = lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM;
@@ -65,8 +65,6 @@ static int pwm_lpss_update(struct pwm_device *pwm)
65 u32 val; 65 u32 val;
66 int err; 66 int err;
67 67
68 pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
69
70 /* 68 /*
71 * PWM Configuration register has SW_UPDATE bit that is set when a new 69 * PWM Configuration register has SW_UPDATE bit that is set when a new
72 * configuration is written to the register. The bit is automatically 70 * configuration is written to the register. The bit is automatically
@@ -122,6 +120,12 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
122 pwm_lpss_write(pwm, ctrl); 120 pwm_lpss_write(pwm, ctrl);
123} 121}
124 122
123static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond)
124{
125 if (cond)
126 pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE);
127}
128
125static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm, 129static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
126 struct pwm_state *state) 130 struct pwm_state *state)
127{ 131{
@@ -137,18 +141,21 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
137 return ret; 141 return ret;
138 } 142 }
139 pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); 143 pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
140 ret = pwm_lpss_update(pwm); 144 pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
145 pwm_lpss_cond_enable(pwm, lpwm->info->bypass == false);
146 ret = pwm_lpss_wait_for_update(pwm);
141 if (ret) { 147 if (ret) {
142 pm_runtime_put(chip->dev); 148 pm_runtime_put(chip->dev);
143 return ret; 149 return ret;
144 } 150 }
145 pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE); 151 pwm_lpss_cond_enable(pwm, lpwm->info->bypass == true);
146 } else { 152 } else {
147 ret = pwm_lpss_is_updating(pwm); 153 ret = pwm_lpss_is_updating(pwm);
148 if (ret) 154 if (ret)
149 return ret; 155 return ret;
150 pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); 156 pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
151 return pwm_lpss_update(pwm); 157 pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
158 return pwm_lpss_wait_for_update(pwm);
152 } 159 }
153 } else if (pwm_is_enabled(pwm)) { 160 } else if (pwm_is_enabled(pwm)) {
154 pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE); 161 pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE);
diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h
index c94cd7c2695d..98306bb02cfe 100644
--- a/drivers/pwm/pwm-lpss.h
+++ b/drivers/pwm/pwm-lpss.h
@@ -22,6 +22,7 @@ struct pwm_lpss_boardinfo {
22 unsigned long clk_rate; 22 unsigned long clk_rate;
23 unsigned int npwm; 23 unsigned int npwm;
24 unsigned long base_unit_bits; 24 unsigned long base_unit_bits;
25 bool bypass;
25}; 26};
26 27
27struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r, 28struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index ef89df1f7336..744d56197286 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -191,6 +191,28 @@ static int rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
191 return 0; 191 return 0;
192} 192}
193 193
194static int rockchip_pwm_enable(struct pwm_chip *chip,
195 struct pwm_device *pwm,
196 bool enable,
197 enum pwm_polarity polarity)
198{
199 struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
200 int ret;
201
202 if (enable) {
203 ret = clk_enable(pc->clk);
204 if (ret)
205 return ret;
206 }
207
208 pc->data->set_enable(chip, pwm, enable, polarity);
209
210 if (!enable)
211 clk_disable(pc->clk);
212
213 return 0;
214}
215
194static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, 216static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
195 struct pwm_state *state) 217 struct pwm_state *state)
196{ 218{
@@ -207,22 +229,26 @@ static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
207 return ret; 229 return ret;
208 230
209 if (state->polarity != curstate.polarity && enabled) { 231 if (state->polarity != curstate.polarity && enabled) {
210 pc->data->set_enable(chip, pwm, false, state->polarity); 232 ret = rockchip_pwm_enable(chip, pwm, false, state->polarity);
233 if (ret)
234 goto out;
211 enabled = false; 235 enabled = false;
212 } 236 }
213 237
214 ret = rockchip_pwm_config(chip, pwm, state->duty_cycle, state->period); 238 ret = rockchip_pwm_config(chip, pwm, state->duty_cycle, state->period);
215 if (ret) { 239 if (ret) {
216 if (enabled != curstate.enabled) 240 if (enabled != curstate.enabled)
217 pc->data->set_enable(chip, pwm, !enabled, 241 rockchip_pwm_enable(chip, pwm, !enabled,
218 state->polarity); 242 state->polarity);
219
220 goto out; 243 goto out;
221 } 244 }
222 245
223 if (state->enabled != enabled) 246 if (state->enabled != enabled) {
224 pc->data->set_enable(chip, pwm, state->enabled, 247 ret = rockchip_pwm_enable(chip, pwm, state->enabled,
225 state->polarity); 248 state->polarity);
249 if (ret)
250 goto out;
251 }
226 252
227 /* 253 /*
228 * Update the state with the real hardware, which can differ a bit 254 * Update the state with the real hardware, which can differ a bit
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 9d19b9a62011..315a4be8dc1e 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -37,8 +37,8 @@
37#include "tsi721.h" 37#include "tsi721.h"
38 38
39#ifdef DEBUG 39#ifdef DEBUG
40u32 dbg_level; 40u32 tsi_dbg_level;
41module_param(dbg_level, uint, S_IWUSR | S_IRUGO); 41module_param_named(dbg_level, tsi_dbg_level, uint, S_IWUSR | S_IRUGO);
42MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)"); 42MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
43#endif 43#endif
44 44
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 5941437cbdd1..957eadc58150 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -40,11 +40,11 @@ enum {
40}; 40};
41 41
42#ifdef DEBUG 42#ifdef DEBUG
43extern u32 dbg_level; 43extern u32 tsi_dbg_level;
44 44
45#define tsi_debug(level, dev, fmt, arg...) \ 45#define tsi_debug(level, dev, fmt, arg...) \
46 do { \ 46 do { \
47 if (DBG_##level & dbg_level) \ 47 if (DBG_##level & tsi_dbg_level) \
48 dev_dbg(dev, "%s: " fmt "\n", __func__, ##arg); \ 48 dev_dbg(dev, "%s: " fmt "\n", __func__, ##arg); \
49 } while (0) 49 } while (0)
50#else 50#else
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index f1e5e65388bb..cd739d2fa160 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -275,7 +275,7 @@ int reset_control_status(struct reset_control *rstc)
275} 275}
276EXPORT_SYMBOL_GPL(reset_control_status); 276EXPORT_SYMBOL_GPL(reset_control_status);
277 277
278static struct reset_control *__reset_control_get( 278static struct reset_control *__reset_control_get_internal(
279 struct reset_controller_dev *rcdev, 279 struct reset_controller_dev *rcdev,
280 unsigned int index, bool shared) 280 unsigned int index, bool shared)
281{ 281{
@@ -308,7 +308,7 @@ static struct reset_control *__reset_control_get(
308 return rstc; 308 return rstc;
309} 309}
310 310
311static void __reset_control_put(struct reset_control *rstc) 311static void __reset_control_put_internal(struct reset_control *rstc)
312{ 312{
313 lockdep_assert_held(&reset_list_mutex); 313 lockdep_assert_held(&reset_list_mutex);
314 314
@@ -377,7 +377,7 @@ struct reset_control *__of_reset_control_get(struct device_node *node,
377 } 377 }
378 378
379 /* reset_list_mutex also protects the rcdev's reset_control list */ 379 /* reset_list_mutex also protects the rcdev's reset_control list */
380 rstc = __reset_control_get(rcdev, rstc_id, shared); 380 rstc = __reset_control_get_internal(rcdev, rstc_id, shared);
381 381
382 mutex_unlock(&reset_list_mutex); 382 mutex_unlock(&reset_list_mutex);
383 383
@@ -385,6 +385,17 @@ struct reset_control *__of_reset_control_get(struct device_node *node,
385} 385}
386EXPORT_SYMBOL_GPL(__of_reset_control_get); 386EXPORT_SYMBOL_GPL(__of_reset_control_get);
387 387
388struct reset_control *__reset_control_get(struct device *dev, const char *id,
389 int index, bool shared, bool optional)
390{
391 if (dev->of_node)
392 return __of_reset_control_get(dev->of_node, id, index, shared,
393 optional);
394
395 return optional ? NULL : ERR_PTR(-EINVAL);
396}
397EXPORT_SYMBOL_GPL(__reset_control_get);
398
388/** 399/**
389 * reset_control_put - free the reset controller 400 * reset_control_put - free the reset controller
390 * @rstc: reset controller 401 * @rstc: reset controller
@@ -396,7 +407,7 @@ void reset_control_put(struct reset_control *rstc)
396 return; 407 return;
397 408
398 mutex_lock(&reset_list_mutex); 409 mutex_lock(&reset_list_mutex);
399 __reset_control_put(rstc); 410 __reset_control_put_internal(rstc);
400 mutex_unlock(&reset_list_mutex); 411 mutex_unlock(&reset_list_mutex);
401} 412}
402EXPORT_SYMBOL_GPL(reset_control_put); 413EXPORT_SYMBOL_GPL(reset_control_put);
@@ -417,8 +428,7 @@ struct reset_control *__devm_reset_control_get(struct device *dev,
417 if (!ptr) 428 if (!ptr)
418 return ERR_PTR(-ENOMEM); 429 return ERR_PTR(-ENOMEM);
419 430
420 rstc = __of_reset_control_get(dev ? dev->of_node : NULL, 431 rstc = __reset_control_get(dev, id, index, shared, optional);
421 id, index, shared, optional);
422 if (!IS_ERR(rstc)) { 432 if (!IS_ERR(rstc)) {
423 *ptr = rstc; 433 *ptr = rstc;
424 devres_add(dev, ptr); 434 devres_add(dev, ptr);
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 40f1136f5568..058db724b5a2 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -572,6 +572,12 @@ int pkey_sec2protkey(u16 cardnr, u16 domain,
572 rc = -EIO; 572 rc = -EIO;
573 goto out; 573 goto out;
574 } 574 }
575 if (prepcblk->ccp_rscode != 0) {
576 DEBUG_WARN(
577 "pkey_sec2protkey unwrap secure key warning, card response %d/%d\n",
578 (int) prepcblk->ccp_rtcode,
579 (int) prepcblk->ccp_rscode);
580 }
575 581
576 /* process response cprb param block */ 582 /* process response cprb param block */
577 prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX); 583 prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
@@ -761,9 +767,10 @@ out:
761} 767}
762 768
763/* 769/*
764 * Fetch just the mkvp value via query_crypto_facility from adapter. 770 * Fetch the current and old mkvp values via
771 * query_crypto_facility from adapter.
765 */ 772 */
766static int fetch_mkvp(u16 cardnr, u16 domain, u64 *mkvp) 773static int fetch_mkvp(u16 cardnr, u16 domain, u64 mkvp[2])
767{ 774{
768 int rc, found = 0; 775 int rc, found = 0;
769 size_t rlen, vlen; 776 size_t rlen, vlen;
@@ -779,9 +786,10 @@ static int fetch_mkvp(u16 cardnr, u16 domain, u64 *mkvp)
779 rc = query_crypto_facility(cardnr, domain, "STATICSA", 786 rc = query_crypto_facility(cardnr, domain, "STATICSA",
780 rarray, &rlen, varray, &vlen); 787 rarray, &rlen, varray, &vlen);
781 if (rc == 0 && rlen > 8*8 && vlen > 184+8) { 788 if (rc == 0 && rlen > 8*8 && vlen > 184+8) {
782 if (rarray[64] == '2') { 789 if (rarray[8*8] == '2') {
783 /* current master key state is valid */ 790 /* current master key state is valid */
784 *mkvp = *((u64 *)(varray + 184)); 791 mkvp[0] = *((u64 *)(varray + 184));
792 mkvp[1] = *((u64 *)(varray + 172));
785 found = 1; 793 found = 1;
786 } 794 }
787 } 795 }
@@ -796,14 +804,14 @@ struct mkvp_info {
796 struct list_head list; 804 struct list_head list;
797 u16 cardnr; 805 u16 cardnr;
798 u16 domain; 806 u16 domain;
799 u64 mkvp; 807 u64 mkvp[2];
800}; 808};
801 809
802/* a list with mkvp_info entries */ 810/* a list with mkvp_info entries */
803static LIST_HEAD(mkvp_list); 811static LIST_HEAD(mkvp_list);
804static DEFINE_SPINLOCK(mkvp_list_lock); 812static DEFINE_SPINLOCK(mkvp_list_lock);
805 813
806static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp) 814static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 mkvp[2])
807{ 815{
808 int rc = -ENOENT; 816 int rc = -ENOENT;
809 struct mkvp_info *ptr; 817 struct mkvp_info *ptr;
@@ -812,7 +820,7 @@ static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp)
812 list_for_each_entry(ptr, &mkvp_list, list) { 820 list_for_each_entry(ptr, &mkvp_list, list) {
813 if (ptr->cardnr == cardnr && 821 if (ptr->cardnr == cardnr &&
814 ptr->domain == domain) { 822 ptr->domain == domain) {
815 *mkvp = ptr->mkvp; 823 memcpy(mkvp, ptr->mkvp, 2 * sizeof(u64));
816 rc = 0; 824 rc = 0;
817 break; 825 break;
818 } 826 }
@@ -822,7 +830,7 @@ static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp)
822 return rc; 830 return rc;
823} 831}
824 832
825static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp) 833static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp[2])
826{ 834{
827 int found = 0; 835 int found = 0;
828 struct mkvp_info *ptr; 836 struct mkvp_info *ptr;
@@ -831,7 +839,7 @@ static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp)
831 list_for_each_entry(ptr, &mkvp_list, list) { 839 list_for_each_entry(ptr, &mkvp_list, list) {
832 if (ptr->cardnr == cardnr && 840 if (ptr->cardnr == cardnr &&
833 ptr->domain == domain) { 841 ptr->domain == domain) {
834 ptr->mkvp = mkvp; 842 memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64));
835 found = 1; 843 found = 1;
836 break; 844 break;
837 } 845 }
@@ -844,7 +852,7 @@ static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp)
844 } 852 }
845 ptr->cardnr = cardnr; 853 ptr->cardnr = cardnr;
846 ptr->domain = domain; 854 ptr->domain = domain;
847 ptr->mkvp = mkvp; 855 memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64));
848 list_add(&ptr->list, &mkvp_list); 856 list_add(&ptr->list, &mkvp_list);
849 } 857 }
850 spin_unlock_bh(&mkvp_list_lock); 858 spin_unlock_bh(&mkvp_list_lock);
@@ -888,8 +896,8 @@ int pkey_findcard(const struct pkey_seckey *seckey,
888 struct secaeskeytoken *t = (struct secaeskeytoken *) seckey; 896 struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
889 struct zcrypt_device_matrix *device_matrix; 897 struct zcrypt_device_matrix *device_matrix;
890 u16 card, dom; 898 u16 card, dom;
891 u64 mkvp; 899 u64 mkvp[2];
892 int i, rc; 900 int i, rc, oi = -1;
893 901
894 /* mkvp must not be zero */ 902 /* mkvp must not be zero */
895 if (t->mkvp == 0) 903 if (t->mkvp == 0)
@@ -910,14 +918,14 @@ int pkey_findcard(const struct pkey_seckey *seckey,
910 device_matrix->device[i].functions & 0x04) { 918 device_matrix->device[i].functions & 0x04) {
911 /* an enabled CCA Coprocessor card */ 919 /* an enabled CCA Coprocessor card */
912 /* try cached mkvp */ 920 /* try cached mkvp */
913 if (mkvp_cache_fetch(card, dom, &mkvp) == 0 && 921 if (mkvp_cache_fetch(card, dom, mkvp) == 0 &&
914 t->mkvp == mkvp) { 922 t->mkvp == mkvp[0]) {
915 if (!verify) 923 if (!verify)
916 break; 924 break;
917 /* verify: fetch mkvp from adapter */ 925 /* verify: fetch mkvp from adapter */
918 if (fetch_mkvp(card, dom, &mkvp) == 0) { 926 if (fetch_mkvp(card, dom, mkvp) == 0) {
919 mkvp_cache_update(card, dom, mkvp); 927 mkvp_cache_update(card, dom, mkvp);
920 if (t->mkvp == mkvp) 928 if (t->mkvp == mkvp[0])
921 break; 929 break;
922 } 930 }
923 } 931 }
@@ -936,14 +944,21 @@ int pkey_findcard(const struct pkey_seckey *seckey,
936 card = AP_QID_CARD(device_matrix->device[i].qid); 944 card = AP_QID_CARD(device_matrix->device[i].qid);
937 dom = AP_QID_QUEUE(device_matrix->device[i].qid); 945 dom = AP_QID_QUEUE(device_matrix->device[i].qid);
938 /* fresh fetch mkvp from adapter */ 946 /* fresh fetch mkvp from adapter */
939 if (fetch_mkvp(card, dom, &mkvp) == 0) { 947 if (fetch_mkvp(card, dom, mkvp) == 0) {
940 mkvp_cache_update(card, dom, mkvp); 948 mkvp_cache_update(card, dom, mkvp);
941 if (t->mkvp == mkvp) 949 if (t->mkvp == mkvp[0])
942 break; 950 break;
951 if (t->mkvp == mkvp[1] && oi < 0)
952 oi = i;
943 } 953 }
944 } 954 }
955 if (i >= MAX_ZDEV_ENTRIES && oi >= 0) {
956 /* old mkvp matched, use this card then */
957 card = AP_QID_CARD(device_matrix->device[oi].qid);
958 dom = AP_QID_QUEUE(device_matrix->device[oi].qid);
959 }
945 } 960 }
946 if (i < MAX_ZDEV_ENTRIES) { 961 if (i < MAX_ZDEV_ENTRIES || oi >= 0) {
947 if (pcardnr) 962 if (pcardnr)
948 *pcardnr = card; 963 *pcardnr = card;
949 if (pdomain) 964 if (pdomain)
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index e7addea8741b..d9561e39c3b2 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -961,7 +961,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
961int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role); 961int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
962int qeth_bridgeport_an_set(struct qeth_card *card, int enable); 962int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
963int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); 963int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
964int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int); 964int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb,
965 int extra_elems, int data_offset);
965int qeth_get_elements_for_frags(struct sk_buff *); 966int qeth_get_elements_for_frags(struct sk_buff *);
966int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, 967int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
967 struct sk_buff *, struct qeth_hdr *, int, int, int); 968 struct sk_buff *, struct qeth_hdr *, int, int, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 315d8a2db7c0..9a5f99ccb122 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -3837,6 +3837,7 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
3837 * @card: qeth card structure, to check max. elems. 3837 * @card: qeth card structure, to check max. elems.
3838 * @skb: SKB address 3838 * @skb: SKB address
3839 * @extra_elems: extra elems needed, to check against max. 3839 * @extra_elems: extra elems needed, to check against max.
3840 * @data_offset: range starts at skb->data + data_offset
3840 * 3841 *
3841 * Returns the number of pages, and thus QDIO buffer elements, needed to cover 3842 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3842 * skb data, including linear part and fragments. Checks if the result plus 3843 * skb data, including linear part and fragments. Checks if the result plus
@@ -3844,10 +3845,10 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
3844 * Note: extra_elems is not included in the returned result. 3845 * Note: extra_elems is not included in the returned result.
3845 */ 3846 */
3846int qeth_get_elements_no(struct qeth_card *card, 3847int qeth_get_elements_no(struct qeth_card *card,
3847 struct sk_buff *skb, int extra_elems) 3848 struct sk_buff *skb, int extra_elems, int data_offset)
3848{ 3849{
3849 int elements = qeth_get_elements_for_range( 3850 int elements = qeth_get_elements_for_range(
3850 (addr_t)skb->data, 3851 (addr_t)skb->data + data_offset,
3851 (addr_t)skb->data + skb_headlen(skb)) + 3852 (addr_t)skb->data + skb_headlen(skb)) +
3852 qeth_get_elements_for_frags(skb); 3853 qeth_get_elements_for_frags(skb);
3853 3854
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index bea483307618..af4e6a639fec 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -849,7 +849,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
849 * chaining we can not send long frag lists 849 * chaining we can not send long frag lists
850 */ 850 */
851 if ((card->info.type != QETH_CARD_TYPE_IQD) && 851 if ((card->info.type != QETH_CARD_TYPE_IQD) &&
852 !qeth_get_elements_no(card, new_skb, 0)) { 852 !qeth_get_elements_no(card, new_skb, 0, 0)) {
853 int lin_rc = skb_linearize(new_skb); 853 int lin_rc = skb_linearize(new_skb);
854 854
855 if (card->options.performance_stats) { 855 if (card->options.performance_stats) {
@@ -894,7 +894,8 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
894 } 894 }
895 } 895 }
896 896
897 elements = qeth_get_elements_no(card, new_skb, elements_needed); 897 elements = qeth_get_elements_no(card, new_skb, elements_needed,
898 (data_offset > 0) ? data_offset : 0);
898 if (!elements) { 899 if (!elements) {
899 if (data_offset >= 0) 900 if (data_offset >= 0)
900 kmem_cache_free(qeth_core_header_cache, hdr); 901 kmem_cache_free(qeth_core_header_cache, hdr);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 06d0addcc058..653f0fb76573 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2609,17 +2609,13 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
2609 char daddr[16]; 2609 char daddr[16];
2610 struct af_iucv_trans_hdr *iucv_hdr; 2610 struct af_iucv_trans_hdr *iucv_hdr;
2611 2611
2612 skb_pull(skb, 14);
2613 card->dev->header_ops->create(skb, card->dev, 0,
2614 card->dev->dev_addr, card->dev->dev_addr,
2615 card->dev->addr_len);
2616 skb_pull(skb, 14);
2617 iucv_hdr = (struct af_iucv_trans_hdr *)skb->data;
2618 memset(hdr, 0, sizeof(struct qeth_hdr)); 2612 memset(hdr, 0, sizeof(struct qeth_hdr));
2619 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; 2613 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
2620 hdr->hdr.l3.ext_flags = 0; 2614 hdr->hdr.l3.ext_flags = 0;
2621 hdr->hdr.l3.length = skb->len; 2615 hdr->hdr.l3.length = skb->len - ETH_HLEN;
2622 hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST; 2616 hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
2617
2618 iucv_hdr = (struct af_iucv_trans_hdr *) (skb->data + ETH_HLEN);
2623 memset(daddr, 0, sizeof(daddr)); 2619 memset(daddr, 0, sizeof(daddr));
2624 daddr[0] = 0xfe; 2620 daddr[0] = 0xfe;
2625 daddr[1] = 0x80; 2621 daddr[1] = 0x80;
@@ -2823,10 +2819,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2823 if ((card->info.type == QETH_CARD_TYPE_IQD) && 2819 if ((card->info.type == QETH_CARD_TYPE_IQD) &&
2824 !skb_is_nonlinear(skb)) { 2820 !skb_is_nonlinear(skb)) {
2825 new_skb = skb; 2821 new_skb = skb;
2826 if (new_skb->protocol == ETH_P_AF_IUCV) 2822 data_offset = ETH_HLEN;
2827 data_offset = 0;
2828 else
2829 data_offset = ETH_HLEN;
2830 hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); 2823 hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
2831 if (!hdr) 2824 if (!hdr)
2832 goto tx_drop; 2825 goto tx_drop;
@@ -2867,7 +2860,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2867 */ 2860 */
2868 if ((card->info.type != QETH_CARD_TYPE_IQD) && 2861 if ((card->info.type != QETH_CARD_TYPE_IQD) &&
2869 ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) || 2862 ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
2870 (!use_tso && !qeth_get_elements_no(card, new_skb, 0)))) { 2863 (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0)))) {
2871 int lin_rc = skb_linearize(new_skb); 2864 int lin_rc = skb_linearize(new_skb);
2872 2865
2873 if (card->options.performance_stats) { 2866 if (card->options.performance_stats) {
@@ -2909,7 +2902,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2909 2902
2910 elements = use_tso ? 2903 elements = use_tso ?
2911 qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) : 2904 qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) :
2912 qeth_get_elements_no(card, new_skb, hdr_elements); 2905 qeth_get_elements_no(card, new_skb, hdr_elements,
2906 (data_offset > 0) ? data_offset : 0);
2913 if (!elements) { 2907 if (!elements) {
2914 if (data_offset >= 0) 2908 if (data_offset >= 0)
2915 kmem_cache_free(qeth_core_header_cache, hdr); 2909 kmem_cache_free(qeth_core_header_cache, hdr);
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index d036a806f31c..d281492009fb 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1690,9 +1690,6 @@ struct aac_dev
1690#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \ 1690#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \
1691 (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) 1691 (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4)
1692 1692
1693#define aac_adapter_check_health(dev) \
1694 (dev)->a_ops.adapter_check_health(dev)
1695
1696#define aac_adapter_restart(dev, bled, reset_type) \ 1693#define aac_adapter_restart(dev, bled, reset_type) \
1697 ((dev)->a_ops.adapter_restart(dev, bled, reset_type)) 1694 ((dev)->a_ops.adapter_restart(dev, bled, reset_type))
1698 1695
@@ -2615,6 +2612,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
2615 return capacity; 2612 return capacity;
2616} 2613}
2617 2614
2615static inline int aac_adapter_check_health(struct aac_dev *dev)
2616{
2617 if (unlikely(pci_channel_offline(dev->pdev)))
2618 return -1;
2619
2620 return (dev)->a_ops.adapter_check_health(dev);
2621}
2622
2618/* SCp.phase values */ 2623/* SCp.phase values */
2619#define AAC_OWNER_MIDLEVEL 0x101 2624#define AAC_OWNER_MIDLEVEL 0x101
2620#define AAC_OWNER_LOWLEVEL 0x102 2625#define AAC_OWNER_LOWLEVEL 0x102
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index a3ad04293487..1f4918355fdb 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1873,7 +1873,8 @@ int aac_check_health(struct aac_dev * aac)
1873 spin_unlock_irqrestore(&aac->fib_lock, flagv); 1873 spin_unlock_irqrestore(&aac->fib_lock, flagv);
1874 1874
1875 if (BlinkLED < 0) { 1875 if (BlinkLED < 0) {
1876 printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED); 1876 printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n",
1877 aac->name, BlinkLED);
1877 goto out; 1878 goto out;
1878 } 1879 }
1879 1880
@@ -2056,7 +2057,6 @@ static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool,
2056{ 2057{
2057 struct hw_fib **hw_fib_p; 2058 struct hw_fib **hw_fib_p;
2058 struct fib **fib_p; 2059 struct fib **fib_p;
2059 int rcode = 1;
2060 2060
2061 hw_fib_p = hw_fib_pool; 2061 hw_fib_p = hw_fib_pool;
2062 fib_p = fib_pool; 2062 fib_p = fib_pool;
@@ -2074,11 +2074,11 @@ static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool,
2074 } 2074 }
2075 } 2075 }
2076 2076
2077 /*
2078 * Get the actual number of allocated fibs
2079 */
2077 num = hw_fib_p - hw_fib_pool; 2080 num = hw_fib_p - hw_fib_pool;
2078 if (!num) 2081 return num;
2079 rcode = 0;
2080
2081 return rcode;
2082} 2082}
2083 2083
2084static void wakeup_fibctx_threads(struct aac_dev *dev, 2084static void wakeup_fibctx_threads(struct aac_dev *dev,
@@ -2186,7 +2186,6 @@ static void aac_process_events(struct aac_dev *dev)
2186 struct fib *fib; 2186 struct fib *fib;
2187 unsigned long flags; 2187 unsigned long flags;
2188 spinlock_t *t_lock; 2188 spinlock_t *t_lock;
2189 unsigned int rcode;
2190 2189
2191 t_lock = dev->queues->queue[HostNormCmdQueue].lock; 2190 t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2192 spin_lock_irqsave(t_lock, flags); 2191 spin_lock_irqsave(t_lock, flags);
@@ -2269,8 +2268,8 @@ static void aac_process_events(struct aac_dev *dev)
2269 * Fill up fib pointer pools with actual fibs 2268 * Fill up fib pointer pools with actual fibs
2270 * and hw_fibs 2269 * and hw_fibs
2271 */ 2270 */
2272 rcode = fillup_pools(dev, hw_fib_pool, fib_pool, num); 2271 num = fillup_pools(dev, hw_fib_pool, fib_pool, num);
2273 if (!rcode) 2272 if (!num)
2274 goto free_mem; 2273 goto free_mem;
2275 2274
2276 /* 2275 /*
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 48e200102221..c01b47e5b55a 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -113,7 +113,7 @@ struct alua_queue_data {
113#define ALUA_POLICY_SWITCH_ALL 1 113#define ALUA_POLICY_SWITCH_ALL 1
114 114
115static void alua_rtpg_work(struct work_struct *work); 115static void alua_rtpg_work(struct work_struct *work);
116static void alua_rtpg_queue(struct alua_port_group *pg, 116static bool alua_rtpg_queue(struct alua_port_group *pg,
117 struct scsi_device *sdev, 117 struct scsi_device *sdev,
118 struct alua_queue_data *qdata, bool force); 118 struct alua_queue_data *qdata, bool force);
119static void alua_check(struct scsi_device *sdev, bool force); 119static void alua_check(struct scsi_device *sdev, bool force);
@@ -862,7 +862,13 @@ static void alua_rtpg_work(struct work_struct *work)
862 kref_put(&pg->kref, release_port_group); 862 kref_put(&pg->kref, release_port_group);
863} 863}
864 864
865static void alua_rtpg_queue(struct alua_port_group *pg, 865/**
866 * alua_rtpg_queue() - cause RTPG to be submitted asynchronously
867 *
868 * Returns true if and only if alua_rtpg_work() will be called asynchronously.
869 * That function is responsible for calling @qdata->fn().
870 */
871static bool alua_rtpg_queue(struct alua_port_group *pg,
866 struct scsi_device *sdev, 872 struct scsi_device *sdev,
867 struct alua_queue_data *qdata, bool force) 873 struct alua_queue_data *qdata, bool force)
868{ 874{
@@ -870,8 +876,8 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
870 unsigned long flags; 876 unsigned long flags;
871 struct workqueue_struct *alua_wq = kaluad_wq; 877 struct workqueue_struct *alua_wq = kaluad_wq;
872 878
873 if (!pg) 879 if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev))
874 return; 880 return false;
875 881
876 spin_lock_irqsave(&pg->lock, flags); 882 spin_lock_irqsave(&pg->lock, flags);
877 if (qdata) { 883 if (qdata) {
@@ -884,14 +890,12 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
884 pg->flags |= ALUA_PG_RUN_RTPG; 890 pg->flags |= ALUA_PG_RUN_RTPG;
885 kref_get(&pg->kref); 891 kref_get(&pg->kref);
886 pg->rtpg_sdev = sdev; 892 pg->rtpg_sdev = sdev;
887 scsi_device_get(sdev);
888 start_queue = 1; 893 start_queue = 1;
889 } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) { 894 } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) {
890 pg->flags |= ALUA_PG_RUN_RTPG; 895 pg->flags |= ALUA_PG_RUN_RTPG;
891 /* Do not queue if the worker is already running */ 896 /* Do not queue if the worker is already running */
892 if (!(pg->flags & ALUA_PG_RUNNING)) { 897 if (!(pg->flags & ALUA_PG_RUNNING)) {
893 kref_get(&pg->kref); 898 kref_get(&pg->kref);
894 sdev = NULL;
895 start_queue = 1; 899 start_queue = 1;
896 } 900 }
897 } 901 }
@@ -900,13 +904,17 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
900 alua_wq = kaluad_sync_wq; 904 alua_wq = kaluad_sync_wq;
901 spin_unlock_irqrestore(&pg->lock, flags); 905 spin_unlock_irqrestore(&pg->lock, flags);
902 906
903 if (start_queue && 907 if (start_queue) {
904 !queue_delayed_work(alua_wq, &pg->rtpg_work, 908 if (queue_delayed_work(alua_wq, &pg->rtpg_work,
905 msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) { 909 msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS)))
906 if (sdev) 910 sdev = NULL;
907 scsi_device_put(sdev); 911 else
908 kref_put(&pg->kref, release_port_group); 912 kref_put(&pg->kref, release_port_group);
909 } 913 }
914 if (sdev)
915 scsi_device_put(sdev);
916
917 return true;
910} 918}
911 919
912/* 920/*
@@ -1007,11 +1015,13 @@ static int alua_activate(struct scsi_device *sdev,
1007 mutex_unlock(&h->init_mutex); 1015 mutex_unlock(&h->init_mutex);
1008 goto out; 1016 goto out;
1009 } 1017 }
1010 fn = NULL;
1011 rcu_read_unlock(); 1018 rcu_read_unlock();
1012 mutex_unlock(&h->init_mutex); 1019 mutex_unlock(&h->init_mutex);
1013 1020
1014 alua_rtpg_queue(pg, sdev, qdata, true); 1021 if (alua_rtpg_queue(pg, sdev, qdata, true))
1022 fn = NULL;
1023 else
1024 err = SCSI_DH_DEV_OFFLINED;
1015 kref_put(&pg->kref, release_port_group); 1025 kref_put(&pg->kref, release_port_group);
1016out: 1026out:
1017 if (fn) 1027 if (fn)
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 0d0be7754a65..9d659aaace15 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -3885,6 +3885,7 @@ static int hpsa_update_device_info(struct ctlr_info *h,
3885 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) 3885 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3886 hpsa_get_ioaccel_status(h, scsi3addr, this_device); 3886 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
3887 volume_offline = hpsa_volume_offline(h, scsi3addr); 3887 volume_offline = hpsa_volume_offline(h, scsi3addr);
3888 this_device->volume_offline = volume_offline;
3888 if (volume_offline == HPSA_LV_FAILED) { 3889 if (volume_offline == HPSA_LV_FAILED) {
3889 rc = HPSA_LV_FAILED; 3890 rc = HPSA_LV_FAILED;
3890 dev_err(&h->pdev->dev, 3891 dev_err(&h->pdev->dev,
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index b29afafc2885..5d5e272fd815 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6293,7 +6293,12 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6293 break; 6293 break;
6294 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */ 6294 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6295 case IPR_IOASA_IR_DUAL_IOA_DISABLED: 6295 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6296 scsi_cmd->result |= (DID_PASSTHROUGH << 16); 6296 /*
6297 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6298 * so SCSI mid-layer and upper layers handle it accordingly.
6299 */
6300 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6301 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6297 break; 6302 break;
6298 case IPR_IOASC_BUS_WAS_RESET: 6303 case IPR_IOASC_BUS_WAS_RESET:
6299 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER: 6304 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 763f012fdeca..87f5e694dbed 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -221,7 +221,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
221 task->num_scatter = qc->n_elem; 221 task->num_scatter = qc->n_elem;
222 } else { 222 } else {
223 for_each_sg(qc->sg, sg, qc->n_elem, si) 223 for_each_sg(qc->sg, sg, qc->n_elem, si)
224 xfer += sg->length; 224 xfer += sg_dma_len(sg);
225 225
226 task->total_xfer_len = xfer; 226 task->total_xfer_len = xfer;
227 task->num_scatter = si; 227 task->num_scatter = si;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index c05f56c3023f..7b7d314af0e0 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -44,14 +44,6 @@
44/* hbqinfo output buffer size */ 44/* hbqinfo output buffer size */
45#define LPFC_HBQINFO_SIZE 8192 45#define LPFC_HBQINFO_SIZE 8192
46 46
47enum {
48 DUMP_FCP,
49 DUMP_NVME,
50 DUMP_MBX,
51 DUMP_ELS,
52 DUMP_NVMELS,
53};
54
55/* nvmestat output buffer size */ 47/* nvmestat output buffer size */
56#define LPFC_NVMESTAT_SIZE 8192 48#define LPFC_NVMESTAT_SIZE 8192
57#define LPFC_NVMEKTIME_SIZE 8192 49#define LPFC_NVMEKTIME_SIZE 8192
@@ -283,8 +275,22 @@ struct lpfc_idiag {
283 struct lpfc_idiag_offset offset; 275 struct lpfc_idiag_offset offset;
284 void *ptr_private; 276 void *ptr_private;
285}; 277};
278
279#else
280
281#define lpfc_nvmeio_data(phba, fmt, arg...) \
282 no_printk(fmt, ##arg)
283
286#endif 284#endif
287 285
286enum {
287 DUMP_FCP,
288 DUMP_NVME,
289 DUMP_MBX,
290 DUMP_ELS,
291 DUMP_NVMELS,
292};
293
288/* Mask for discovery_trace */ 294/* Mask for discovery_trace */
289#define LPFC_DISC_TRC_ELS_CMD 0x1 /* Trace ELS commands */ 295#define LPFC_DISC_TRC_ELS_CMD 0x1 /* Trace ELS commands */
290#define LPFC_DISC_TRC_ELS_RSP 0x2 /* Trace ELS response */ 296#define LPFC_DISC_TRC_ELS_RSP 0x2 /* Trace ELS response */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index d9c61d030034..a5ca37e45fb6 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -7968,7 +7968,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7968 did, vport->port_state, ndlp->nlp_flag); 7968 did, vport->port_state, ndlp->nlp_flag);
7969 7969
7970 phba->fc_stat.elsRcvPRLI++; 7970 phba->fc_stat.elsRcvPRLI++;
7971 if (vport->port_state < LPFC_DISC_AUTH) { 7971 if ((vport->port_state < LPFC_DISC_AUTH) &&
7972 (vport->fc_flag & FC_FABRIC)) {
7972 rjt_err = LSRJT_UNABLE_TPC; 7973 rjt_err = LSRJT_UNABLE_TPC;
7973 rjt_exp = LSEXP_NOTHING_MORE; 7974 rjt_exp = LSEXP_NOTHING_MORE;
7974 break; 7975 break;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 7ca868f394da..acba1b67e505 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -520,7 +520,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
520 struct lpfc_hba *phba = ctxp->phba; 520 struct lpfc_hba *phba = ctxp->phba;
521 struct lpfc_iocbq *nvmewqeq; 521 struct lpfc_iocbq *nvmewqeq;
522 unsigned long iflags; 522 unsigned long iflags;
523 int rc, id; 523 int rc;
524 524
525#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 525#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
526 if (phba->ktime_on) { 526 if (phba->ktime_on) {
@@ -530,7 +530,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
530 ctxp->ts_nvme_data = ktime_get_ns(); 530 ctxp->ts_nvme_data = ktime_get_ns();
531 } 531 }
532 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { 532 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
533 id = smp_processor_id(); 533 int id = smp_processor_id();
534 ctxp->cpu = id; 534 ctxp->cpu = id;
535 if (id < LPFC_CHECK_CPU_CNT) 535 if (id < LPFC_CHECK_CPU_CNT)
536 phba->cpucheck_xmt_io[id]++; 536 phba->cpucheck_xmt_io[id]++;
diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c
index ed58b9104f58..e10b91cc3c62 100644
--- a/drivers/scsi/qedf/qedf_fip.c
+++ b/drivers/scsi/qedf/qedf_fip.c
@@ -99,7 +99,8 @@ static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf,
99 qedf_set_vlan_id(qedf, vid); 99 qedf_set_vlan_id(qedf, vid);
100 100
101 /* Inform waiter that it's ok to call fcoe_ctlr_link up() */ 101 /* Inform waiter that it's ok to call fcoe_ctlr_link up() */
102 complete(&qedf->fipvlan_compl); 102 if (!completion_done(&qedf->fipvlan_compl))
103 complete(&qedf->fipvlan_compl);
103 } 104 }
104} 105}
105 106
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 8e2a160490e6..cceddd995a4b 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2803,6 +2803,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
2803 atomic_set(&qedf->num_offloads, 0); 2803 atomic_set(&qedf->num_offloads, 0);
2804 qedf->stop_io_on_error = false; 2804 qedf->stop_io_on_error = false;
2805 pci_set_drvdata(pdev, qedf); 2805 pci_set_drvdata(pdev, qedf);
2806 init_completion(&qedf->fipvlan_compl);
2806 2807
2807 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, 2808 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
2808 "QLogic FastLinQ FCoE Module qedf %s, " 2809 "QLogic FastLinQ FCoE Module qedf %s, "
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 8e3d92807cb8..92775a8b74b1 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -2007,6 +2007,7 @@ static void qedi_remove(struct pci_dev *pdev)
2007 2007
2008static struct pci_device_id qedi_pci_tbl[] = { 2008static struct pci_device_id qedi_pci_tbl[] = {
2009 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) }, 2009 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) },
2010 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8084) },
2010 { 0 }, 2011 { 0 },
2011}; 2012};
2012MODULE_DEVICE_TABLE(pci, qedi_pci_tbl); 2013MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 41d5b09f7326..83d61d2142e9 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1160,8 +1160,13 @@ static inline
1160uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha) 1160uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
1161{ 1161{
1162 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1162 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1163 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1163 1164
1164 return ((RD_REG_DWORD(&reg->host_status)) == ISP_REG_DISCONNECT); 1165 if (IS_P3P_TYPE(ha))
1166 return ((RD_REG_DWORD(&reg82->host_int)) == ISP_REG_DISCONNECT);
1167 else
1168 return ((RD_REG_DWORD(&reg->host_status)) ==
1169 ISP_REG_DISCONNECT);
1165} 1170}
1166 1171
1167/************************************************************************** 1172/**************************************************************************
@@ -1651,7 +1656,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1651 /* Don't abort commands in adapter during EEH 1656 /* Don't abort commands in adapter during EEH
1652 * recovery as it's not accessible/responding. 1657 * recovery as it's not accessible/responding.
1653 */ 1658 */
1654 if (GET_CMD_SP(sp) && !ha->flags.eeh_busy) { 1659 if (GET_CMD_SP(sp) && !ha->flags.eeh_busy &&
1660 (sp->type == SRB_SCSI_CMD)) {
1655 /* Get a reference to the sp and drop the lock. 1661 /* Get a reference to the sp and drop the lock.
1656 * The reference ensures this sp->done() call 1662 * The reference ensures this sp->done() call
1657 * - and not the call in qla2xxx_eh_abort() - 1663 * - and not the call in qla2xxx_eh_abort() -
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 19125d72f322..e5a2d590a104 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -496,7 +496,7 @@ static void scsi_run_queue(struct request_queue *q)
496 scsi_starved_list_run(sdev->host); 496 scsi_starved_list_run(sdev->host);
497 497
498 if (q->mq_ops) 498 if (q->mq_ops)
499 blk_mq_start_stopped_hw_queues(q, false); 499 blk_mq_run_hw_queues(q, false);
500 else 500 else
501 blk_run_queue(q); 501 blk_run_queue(q);
502} 502}
@@ -667,7 +667,7 @@ static bool scsi_end_request(struct request *req, int error,
667 !list_empty(&sdev->host->starved_list)) 667 !list_empty(&sdev->host->starved_list))
668 kblockd_schedule_work(&sdev->requeue_work); 668 kblockd_schedule_work(&sdev->requeue_work);
669 else 669 else
670 blk_mq_start_stopped_hw_queues(q, true); 670 blk_mq_run_hw_queues(q, true);
671 } else { 671 } else {
672 unsigned long flags; 672 unsigned long flags;
673 673
@@ -1974,7 +1974,7 @@ out:
1974 case BLK_MQ_RQ_QUEUE_BUSY: 1974 case BLK_MQ_RQ_QUEUE_BUSY:
1975 if (atomic_read(&sdev->device_busy) == 0 && 1975 if (atomic_read(&sdev->device_busy) == 0 &&
1976 !scsi_device_blocked(sdev)) 1976 !scsi_device_blocked(sdev))
1977 blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY); 1977 blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
1978 break; 1978 break;
1979 case BLK_MQ_RQ_QUEUE_ERROR: 1979 case BLK_MQ_RQ_QUEUE_ERROR:
1980 /* 1980 /*
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index fcfeddc79331..35ad5e8a31ab 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2102,6 +2102,22 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
2102 2102
2103#define READ_CAPACITY_RETRIES_ON_RESET 10 2103#define READ_CAPACITY_RETRIES_ON_RESET 10
2104 2104
2105/*
2106 * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
2107 * and the reported logical block size is bigger than 512 bytes. Note
2108 * that last_sector is a u64 and therefore logical_to_sectors() is not
2109 * applicable.
2110 */
2111static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
2112{
2113 u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
2114
2115 if (sizeof(sector_t) == 4 && last_sector > U32_MAX)
2116 return false;
2117
2118 return true;
2119}
2120
2105static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, 2121static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
2106 unsigned char *buffer) 2122 unsigned char *buffer)
2107{ 2123{
@@ -2167,7 +2183,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
2167 return -ENODEV; 2183 return -ENODEV;
2168 } 2184 }
2169 2185
2170 if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) { 2186 if (!sd_addressable_capacity(lba, sector_size)) {
2171 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a " 2187 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
2172 "kernel compiled with support for large block " 2188 "kernel compiled with support for large block "
2173 "devices.\n"); 2189 "devices.\n");
@@ -2256,7 +2272,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
2256 return sector_size; 2272 return sector_size;
2257 } 2273 }
2258 2274
2259 if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) { 2275 if (!sd_addressable_capacity(lba, sector_size)) {
2260 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a " 2276 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
2261 "kernel compiled with support for large block " 2277 "kernel compiled with support for large block "
2262 "devices.\n"); 2278 "devices.\n");
@@ -2956,7 +2972,8 @@ static int sd_revalidate_disk(struct gendisk *disk)
2956 q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); 2972 q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
2957 rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); 2973 rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
2958 } else 2974 } else
2959 rw_max = BLK_DEF_MAX_SECTORS; 2975 rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
2976 (sector_t)BLK_DEF_MAX_SECTORS);
2960 2977
2961 /* Combine with controller limits */ 2978 /* Combine with controller limits */
2962 q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q)); 2979 q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 29b86505f796..225abaad4d1c 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -996,6 +996,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
996 result = get_user(val, ip); 996 result = get_user(val, ip);
997 if (result) 997 if (result)
998 return result; 998 return result;
999 if (val > SG_MAX_CDB_SIZE)
1000 return -ENOMEM;
999 sfp->next_cmd_len = (val > 0) ? val : 0; 1001 sfp->next_cmd_len = (val > 0) ? val : 0;
1000 return 0; 1002 return 0;
1001 case SG_GET_VERSION_NUM: 1003 case SG_GET_VERSION_NUM:
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 0b29b9329b1c..a8f630213a1a 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -836,6 +836,7 @@ static void get_capabilities(struct scsi_cd *cd)
836 unsigned char *buffer; 836 unsigned char *buffer;
837 struct scsi_mode_data data; 837 struct scsi_mode_data data;
838 struct scsi_sense_hdr sshdr; 838 struct scsi_sense_hdr sshdr;
839 unsigned int ms_len = 128;
839 int rc, n; 840 int rc, n;
840 841
841 static const char *loadmech[] = 842 static const char *loadmech[] =
@@ -862,10 +863,11 @@ static void get_capabilities(struct scsi_cd *cd)
862 scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr); 863 scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
863 864
864 /* ask for mode page 0x2a */ 865 /* ask for mode page 0x2a */
865 rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128, 866 rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
866 SR_TIMEOUT, 3, &data, NULL); 867 SR_TIMEOUT, 3, &data, NULL);
867 868
868 if (!scsi_status_is_good(rc)) { 869 if (!scsi_status_is_good(rc) || data.length > ms_len ||
870 data.header_length + data.block_descriptor_length > data.length) {
869 /* failed, drive doesn't have capabilities mode page */ 871 /* failed, drive doesn't have capabilities mode page */
870 cd->cdi.speed = 1; 872 cd->cdi.speed = 1;
871 cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R | 873 cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index a72a4ba78125..8e5e6c04c035 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -309,8 +309,8 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
309 309
310 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 310 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
311 mmio_base = devm_ioremap_resource(dev, mem_res); 311 mmio_base = devm_ioremap_resource(dev, mem_res);
312 if (IS_ERR(*(void **)&mmio_base)) { 312 if (IS_ERR(mmio_base)) {
313 err = PTR_ERR(*(void **)&mmio_base); 313 err = PTR_ERR(mmio_base);
314 goto out; 314 goto out;
315 } 315 }
316 316
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index e8c26e6e6237..096e95b911bd 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -4662,8 +4662,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
4662 } 4662 }
4663 if (ufshcd_is_clkscaling_supported(hba)) 4663 if (ufshcd_is_clkscaling_supported(hba))
4664 hba->clk_scaling.active_reqs--; 4664 hba->clk_scaling.active_reqs--;
4665 if (ufshcd_is_clkscaling_supported(hba))
4666 hba->clk_scaling.active_reqs--;
4667 } 4665 }
4668 4666
4669 /* clear corresponding bits of completed commands */ 4667 /* clear corresponding bits of completed commands */
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 7cbad0d45b9c..6ba270e0494d 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -409,6 +409,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
409 ret = PTR_ERR(vmfile); 409 ret = PTR_ERR(vmfile);
410 goto out; 410 goto out;
411 } 411 }
412 vmfile->f_mode |= FMODE_LSEEK;
412 asma->file = vmfile; 413 asma->file = vmfile;
413 } 414 }
414 get_file(asma->file); 415 get_file(asma->file);
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index f45115fce4eb..95a7f1648c00 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1020,10 +1020,10 @@ static const struct dma_buf_ops dma_buf_ops = {
1020 .release = ion_dma_buf_release, 1020 .release = ion_dma_buf_release,
1021 .begin_cpu_access = ion_dma_buf_begin_cpu_access, 1021 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1022 .end_cpu_access = ion_dma_buf_end_cpu_access, 1022 .end_cpu_access = ion_dma_buf_end_cpu_access,
1023 .kmap_atomic = ion_dma_buf_kmap, 1023 .map_atomic = ion_dma_buf_kmap,
1024 .kunmap_atomic = ion_dma_buf_kunmap, 1024 .unmap_atomic = ion_dma_buf_kunmap,
1025 .kmap = ion_dma_buf_kmap, 1025 .map = ion_dma_buf_kmap,
1026 .kunmap = ion_dma_buf_kunmap, 1026 .unmap = ion_dma_buf_kunmap,
1027}; 1027};
1028 1028
1029struct dma_buf *ion_share_dma_buf(struct ion_client *client, 1029struct dma_buf *ion_share_dma_buf(struct ion_client *client,
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index a91802432f2f..e3f9ed3690b7 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -485,8 +485,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *);
485 485
486int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 486int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
487{ 487{
488 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); 488 return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
489 return 0;
490} 489}
491EXPORT_SYMBOL(iscsit_queue_rsp); 490EXPORT_SYMBOL(iscsit_queue_rsp);
492 491
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index bf40f03755dd..344e8448869c 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1398,11 +1398,10 @@ static u32 lio_sess_get_initiator_sid(
1398static int lio_queue_data_in(struct se_cmd *se_cmd) 1398static int lio_queue_data_in(struct se_cmd *se_cmd)
1399{ 1399{
1400 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1400 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1401 struct iscsi_conn *conn = cmd->conn;
1401 1402
1402 cmd->i_state = ISTATE_SEND_DATAIN; 1403 cmd->i_state = ISTATE_SEND_DATAIN;
1403 cmd->conn->conn_transport->iscsit_queue_data_in(cmd->conn, cmd); 1404 return conn->conn_transport->iscsit_queue_data_in(conn, cmd);
1404
1405 return 0;
1406} 1405}
1407 1406
1408static int lio_write_pending(struct se_cmd *se_cmd) 1407static int lio_write_pending(struct se_cmd *se_cmd)
@@ -1431,16 +1430,14 @@ static int lio_write_pending_status(struct se_cmd *se_cmd)
1431static int lio_queue_status(struct se_cmd *se_cmd) 1430static int lio_queue_status(struct se_cmd *se_cmd)
1432{ 1431{
1433 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1432 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1433 struct iscsi_conn *conn = cmd->conn;
1434 1434
1435 cmd->i_state = ISTATE_SEND_STATUS; 1435 cmd->i_state = ISTATE_SEND_STATUS;
1436 1436
1437 if (cmd->se_cmd.scsi_status || cmd->sense_reason) { 1437 if (cmd->se_cmd.scsi_status || cmd->sense_reason) {
1438 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); 1438 return iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
1439 return 0;
1440 } 1439 }
1441 cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd); 1440 return conn->conn_transport->iscsit_queue_status(conn, cmd);
1442
1443 return 0;
1444} 1441}
1445 1442
1446static void lio_queue_tm_rsp(struct se_cmd *se_cmd) 1443static void lio_queue_tm_rsp(struct se_cmd *se_cmd)
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index e65bf78ceef3..fce627628200 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -782,22 +782,6 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
782 if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) 782 if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
783 SET_PSTATE_REPLY_OPTIONAL(param); 783 SET_PSTATE_REPLY_OPTIONAL(param);
784 /* 784 /*
785 * The GlobalSAN iSCSI Initiator for MacOSX does
786 * not respond to MaxBurstLength, FirstBurstLength,
787 * DefaultTime2Wait or DefaultTime2Retain parameter keys.
788 * So, we set them to 'reply optional' here, and assume the
789 * the defaults from iscsi_parameters.h if the initiator
790 * is not RFC compliant and the keys are not negotiated.
791 */
792 if (!strcmp(param->name, MAXBURSTLENGTH))
793 SET_PSTATE_REPLY_OPTIONAL(param);
794 if (!strcmp(param->name, FIRSTBURSTLENGTH))
795 SET_PSTATE_REPLY_OPTIONAL(param);
796 if (!strcmp(param->name, DEFAULTTIME2WAIT))
797 SET_PSTATE_REPLY_OPTIONAL(param);
798 if (!strcmp(param->name, DEFAULTTIME2RETAIN))
799 SET_PSTATE_REPLY_OPTIONAL(param);
800 /*
801 * Required for gPXE iSCSI boot client 785 * Required for gPXE iSCSI boot client
802 */ 786 */
803 if (!strcmp(param->name, MAXCONNECTIONS)) 787 if (!strcmp(param->name, MAXCONNECTIONS))
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 5041a9c8bdcb..7d3e2fcc26a0 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -567,7 +567,7 @@ static void iscsit_remove_cmd_from_immediate_queue(
567 } 567 }
568} 568}
569 569
570void iscsit_add_cmd_to_response_queue( 570int iscsit_add_cmd_to_response_queue(
571 struct iscsi_cmd *cmd, 571 struct iscsi_cmd *cmd,
572 struct iscsi_conn *conn, 572 struct iscsi_conn *conn,
573 u8 state) 573 u8 state)
@@ -578,7 +578,7 @@ void iscsit_add_cmd_to_response_queue(
578 if (!qr) { 578 if (!qr) {
579 pr_err("Unable to allocate memory for" 579 pr_err("Unable to allocate memory for"
580 " struct iscsi_queue_req\n"); 580 " struct iscsi_queue_req\n");
581 return; 581 return -ENOMEM;
582 } 582 }
583 INIT_LIST_HEAD(&qr->qr_list); 583 INIT_LIST_HEAD(&qr->qr_list);
584 qr->cmd = cmd; 584 qr->cmd = cmd;
@@ -590,6 +590,7 @@ void iscsit_add_cmd_to_response_queue(
590 spin_unlock_bh(&conn->response_queue_lock); 590 spin_unlock_bh(&conn->response_queue_lock);
591 591
592 wake_up(&conn->queues_wq); 592 wake_up(&conn->queues_wq);
593 return 0;
593} 594}
594 595
595struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn) 596struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
@@ -737,21 +738,23 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
737{ 738{
738 struct se_cmd *se_cmd = NULL; 739 struct se_cmd *se_cmd = NULL;
739 int rc; 740 int rc;
741 bool op_scsi = false;
740 /* 742 /*
741 * Determine if a struct se_cmd is associated with 743 * Determine if a struct se_cmd is associated with
742 * this struct iscsi_cmd. 744 * this struct iscsi_cmd.
743 */ 745 */
744 switch (cmd->iscsi_opcode) { 746 switch (cmd->iscsi_opcode) {
745 case ISCSI_OP_SCSI_CMD: 747 case ISCSI_OP_SCSI_CMD:
746 se_cmd = &cmd->se_cmd; 748 op_scsi = true;
747 __iscsit_free_cmd(cmd, true, shutdown);
748 /* 749 /*
749 * Fallthrough 750 * Fallthrough
750 */ 751 */
751 case ISCSI_OP_SCSI_TMFUNC: 752 case ISCSI_OP_SCSI_TMFUNC:
752 rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); 753 se_cmd = &cmd->se_cmd;
753 if (!rc && shutdown && se_cmd && se_cmd->se_sess) { 754 __iscsit_free_cmd(cmd, op_scsi, shutdown);
754 __iscsit_free_cmd(cmd, true, shutdown); 755 rc = transport_generic_free_cmd(se_cmd, shutdown);
756 if (!rc && shutdown && se_cmd->se_sess) {
757 __iscsit_free_cmd(cmd, op_scsi, shutdown);
755 target_put_sess_cmd(se_cmd); 758 target_put_sess_cmd(se_cmd);
756 } 759 }
757 break; 760 break;
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 8ff08856516a..9e4197af8708 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -31,7 +31,7 @@ extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd
31 struct iscsi_conn_recovery **, itt_t); 31 struct iscsi_conn_recovery **, itt_t);
32extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8); 32extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
33extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *); 33extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *);
34extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8); 34extern int iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
35extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *); 35extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
36extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *); 36extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
37extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *); 37extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index fd7c16a7ca6e..fc4a9c303d55 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -197,8 +197,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
197 /* 197 /*
198 * Set the ASYMMETRIC ACCESS State 198 * Set the ASYMMETRIC ACCESS State
199 */ 199 */
200 buf[off++] |= (atomic_read( 200 buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff;
201 &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
202 /* 201 /*
203 * Set supported ASYMMETRIC ACCESS State bits 202 * Set supported ASYMMETRIC ACCESS State bits
204 */ 203 */
@@ -710,7 +709,7 @@ target_alua_state_check(struct se_cmd *cmd)
710 709
711 spin_lock(&lun->lun_tg_pt_gp_lock); 710 spin_lock(&lun->lun_tg_pt_gp_lock);
712 tg_pt_gp = lun->lun_tg_pt_gp; 711 tg_pt_gp = lun->lun_tg_pt_gp;
713 out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); 712 out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
714 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; 713 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
715 714
716 // XXX: keeps using tg_pt_gp witout reference after unlock 715 // XXX: keeps using tg_pt_gp witout reference after unlock
@@ -911,7 +910,7 @@ static int core_alua_write_tpg_metadata(
911} 910}
912 911
913/* 912/*
914 * Called with tg_pt_gp->tg_pt_gp_md_mutex held 913 * Called with tg_pt_gp->tg_pt_gp_transition_mutex held
915 */ 914 */
916static int core_alua_update_tpg_primary_metadata( 915static int core_alua_update_tpg_primary_metadata(
917 struct t10_alua_tg_pt_gp *tg_pt_gp) 916 struct t10_alua_tg_pt_gp *tg_pt_gp)
@@ -934,7 +933,7 @@ static int core_alua_update_tpg_primary_metadata(
934 "alua_access_state=0x%02x\n" 933 "alua_access_state=0x%02x\n"
935 "alua_access_status=0x%02x\n", 934 "alua_access_status=0x%02x\n",
936 tg_pt_gp->tg_pt_gp_id, 935 tg_pt_gp->tg_pt_gp_id,
937 tg_pt_gp->tg_pt_gp_alua_pending_state, 936 tg_pt_gp->tg_pt_gp_alua_access_state,
938 tg_pt_gp->tg_pt_gp_alua_access_status); 937 tg_pt_gp->tg_pt_gp_alua_access_status);
939 938
940 snprintf(path, ALUA_METADATA_PATH_LEN, 939 snprintf(path, ALUA_METADATA_PATH_LEN,
@@ -1013,93 +1012,41 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
1013 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1012 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1014} 1013}
1015 1014
1016static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
1017{
1018 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
1019 struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work);
1020 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1021 bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
1022 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
1023
1024 /*
1025 * Update the ALUA metadata buf that has been allocated in
1026 * core_alua_do_port_transition(), this metadata will be written
1027 * to struct file.
1028 *
1029 * Note that there is the case where we do not want to update the
1030 * metadata when the saved metadata is being parsed in userspace
1031 * when setting the existing port access state and access status.
1032 *
1033 * Also note that the failure to write out the ALUA metadata to
1034 * struct file does NOT affect the actual ALUA transition.
1035 */
1036 if (tg_pt_gp->tg_pt_gp_write_metadata) {
1037 mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
1038 core_alua_update_tpg_primary_metadata(tg_pt_gp);
1039 mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
1040 }
1041 /*
1042 * Set the current primary ALUA access state to the requested new state
1043 */
1044 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1045 tg_pt_gp->tg_pt_gp_alua_pending_state);
1046
1047 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1048 " from primary access state %s to %s\n", (explicit) ? "explicit" :
1049 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1050 tg_pt_gp->tg_pt_gp_id,
1051 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
1052 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
1053
1054 core_alua_queue_state_change_ua(tg_pt_gp);
1055
1056 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1057 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1058 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1059
1060 if (tg_pt_gp->tg_pt_gp_transition_complete)
1061 complete(tg_pt_gp->tg_pt_gp_transition_complete);
1062}
1063
1064static int core_alua_do_transition_tg_pt( 1015static int core_alua_do_transition_tg_pt(
1065 struct t10_alua_tg_pt_gp *tg_pt_gp, 1016 struct t10_alua_tg_pt_gp *tg_pt_gp,
1066 int new_state, 1017 int new_state,
1067 int explicit) 1018 int explicit)
1068{ 1019{
1069 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1020 int prev_state;
1070 DECLARE_COMPLETION_ONSTACK(wait);
1071 1021
1022 mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1072 /* Nothing to be done here */ 1023 /* Nothing to be done here */
1073 if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state) 1024 if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) {
1025 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1074 return 0; 1026 return 0;
1027 }
1075 1028
1076 if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) 1029 if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) {
1030 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1077 return -EAGAIN; 1031 return -EAGAIN;
1078 1032 }
1079 /*
1080 * Flush any pending transitions
1081 */
1082 if (!explicit)
1083 flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
1084 1033
1085 /* 1034 /*
1086 * Save the old primary ALUA access state, and set the current state 1035 * Save the old primary ALUA access state, and set the current state
1087 * to ALUA_ACCESS_STATE_TRANSITION. 1036 * to ALUA_ACCESS_STATE_TRANSITION.
1088 */ 1037 */
1089 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1038 prev_state = tg_pt_gp->tg_pt_gp_alua_access_state;
1090 ALUA_ACCESS_STATE_TRANSITION); 1039 tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION;
1091 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? 1040 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
1092 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : 1041 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1093 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; 1042 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1094 1043
1095 core_alua_queue_state_change_ua(tg_pt_gp); 1044 core_alua_queue_state_change_ua(tg_pt_gp);
1096 1045
1097 if (new_state == ALUA_ACCESS_STATE_TRANSITION) 1046 if (new_state == ALUA_ACCESS_STATE_TRANSITION) {
1047 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1098 return 0; 1048 return 0;
1099 1049 }
1100 tg_pt_gp->tg_pt_gp_alua_previous_state =
1101 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
1102 tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1103 1050
1104 /* 1051 /*
1105 * Check for the optional ALUA primary state transition delay 1052 * Check for the optional ALUA primary state transition delay
@@ -1108,19 +1055,36 @@ static int core_alua_do_transition_tg_pt(
1108 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); 1055 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1109 1056
1110 /* 1057 /*
1111 * Take a reference for workqueue item 1058 * Set the current primary ALUA access state to the requested new state
1112 */ 1059 */
1113 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1060 tg_pt_gp->tg_pt_gp_alua_access_state = new_state;
1114 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1115 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1116 1061
1117 schedule_work(&tg_pt_gp->tg_pt_gp_transition_work); 1062 /*
1118 if (explicit) { 1063 * Update the ALUA metadata buf that has been allocated in
1119 tg_pt_gp->tg_pt_gp_transition_complete = &wait; 1064 * core_alua_do_port_transition(), this metadata will be written
1120 wait_for_completion(&wait); 1065 * to struct file.
1121 tg_pt_gp->tg_pt_gp_transition_complete = NULL; 1066 *
1067 * Note that there is the case where we do not want to update the
1068 * metadata when the saved metadata is being parsed in userspace
1069 * when setting the existing port access state and access status.
1070 *
1071 * Also note that the failure to write out the ALUA metadata to
1072 * struct file does NOT affect the actual ALUA transition.
1073 */
1074 if (tg_pt_gp->tg_pt_gp_write_metadata) {
1075 core_alua_update_tpg_primary_metadata(tg_pt_gp);
1122 } 1076 }
1123 1077
1078 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1079 " from primary access state %s to %s\n", (explicit) ? "explicit" :
1080 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1081 tg_pt_gp->tg_pt_gp_id,
1082 core_alua_dump_state(prev_state),
1083 core_alua_dump_state(new_state));
1084
1085 core_alua_queue_state_change_ua(tg_pt_gp);
1086
1087 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1124 return 0; 1088 return 0;
1125} 1089}
1126 1090
@@ -1685,14 +1649,12 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1685 } 1649 }
1686 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); 1650 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1687 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list); 1651 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
1688 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); 1652 mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex);
1689 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); 1653 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1690 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); 1654 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1691 INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
1692 core_alua_do_transition_tg_pt_work);
1693 tg_pt_gp->tg_pt_gp_dev = dev; 1655 tg_pt_gp->tg_pt_gp_dev = dev;
1694 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1656 tg_pt_gp->tg_pt_gp_alua_access_state =
1695 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); 1657 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
1696 /* 1658 /*
1697 * Enable both explicit and implicit ALUA support by default 1659 * Enable both explicit and implicit ALUA support by default
1698 */ 1660 */
@@ -1797,8 +1759,6 @@ void core_alua_free_tg_pt_gp(
1797 dev->t10_alua.alua_tg_pt_gps_counter--; 1759 dev->t10_alua.alua_tg_pt_gps_counter--;
1798 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1760 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1799 1761
1800 flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
1801
1802 /* 1762 /*
1803 * Allow a struct t10_alua_tg_pt_gp_member * referenced by 1763 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1804 * core_alua_get_tg_pt_gp_by_name() in 1764 * core_alua_get_tg_pt_gp_by_name() in
@@ -1938,8 +1898,8 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
1938 "Primary Access Status: %s\nTG Port Secondary Access" 1898 "Primary Access Status: %s\nTG Port Secondary Access"
1939 " State: %s\nTG Port Secondary Access Status: %s\n", 1899 " State: %s\nTG Port Secondary Access Status: %s\n",
1940 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id, 1900 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
1941 core_alua_dump_state(atomic_read( 1901 core_alua_dump_state(
1942 &tg_pt_gp->tg_pt_gp_alua_access_state)), 1902 tg_pt_gp->tg_pt_gp_alua_access_state),
1943 core_alua_dump_status( 1903 core_alua_dump_status(
1944 tg_pt_gp->tg_pt_gp_alua_access_status), 1904 tg_pt_gp->tg_pt_gp_alua_access_status),
1945 atomic_read(&lun->lun_tg_pt_secondary_offline) ? 1905 atomic_read(&lun->lun_tg_pt_secondary_offline) ?
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 38b5025e4c7a..70657fd56440 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -2392,7 +2392,7 @@ static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item,
2392 char *page) 2392 char *page)
2393{ 2393{
2394 return sprintf(page, "%d\n", 2394 return sprintf(page, "%d\n",
2395 atomic_read(&to_tg_pt_gp(item)->tg_pt_gp_alua_access_state)); 2395 to_tg_pt_gp(item)->tg_pt_gp_alua_access_state);
2396} 2396}
2397 2397
2398static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item, 2398static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index d8a16ca6baa5..d1e6cab8e3d3 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -92,6 +92,11 @@ static int target_fabric_mappedlun_link(
92 pr_err("Source se_lun->lun_se_dev does not exist\n"); 92 pr_err("Source se_lun->lun_se_dev does not exist\n");
93 return -EINVAL; 93 return -EINVAL;
94 } 94 }
95 if (lun->lun_shutdown) {
96 pr_err("Unable to create mappedlun symlink because"
97 " lun->lun_shutdown=true\n");
98 return -EINVAL;
99 }
95 se_tpg = lun->lun_tpg; 100 se_tpg = lun->lun_tpg;
96 101
97 nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item; 102 nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 6fb191914f45..dfaef4d3b2d2 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -642,6 +642,8 @@ void core_tpg_remove_lun(
642 */ 642 */
643 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 643 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
644 644
645 lun->lun_shutdown = true;
646
645 core_clear_lun_from_tpg(lun, tpg); 647 core_clear_lun_from_tpg(lun, tpg);
646 /* 648 /*
647 * Wait for any active I/O references to percpu se_lun->lun_ref to 649 * Wait for any active I/O references to percpu se_lun->lun_ref to
@@ -663,6 +665,8 @@ void core_tpg_remove_lun(
663 } 665 }
664 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 666 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
665 hlist_del_rcu(&lun->link); 667 hlist_del_rcu(&lun->link);
668
669 lun->lun_shutdown = false;
666 mutex_unlock(&tpg->tpg_lun_mutex); 670 mutex_unlock(&tpg->tpg_lun_mutex);
667 671
668 percpu_ref_exit(&lun->lun_ref); 672 percpu_ref_exit(&lun->lun_ref);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index b1a3cdb29468..a0cd56ee5fe9 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -64,8 +64,9 @@ struct kmem_cache *t10_alua_lba_map_cache;
64struct kmem_cache *t10_alua_lba_map_mem_cache; 64struct kmem_cache *t10_alua_lba_map_mem_cache;
65 65
66static void transport_complete_task_attr(struct se_cmd *cmd); 66static void transport_complete_task_attr(struct se_cmd *cmd);
67static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
67static void transport_handle_queue_full(struct se_cmd *cmd, 68static void transport_handle_queue_full(struct se_cmd *cmd,
68 struct se_device *dev); 69 struct se_device *dev, int err, bool write_pending);
69static int transport_put_cmd(struct se_cmd *cmd); 70static int transport_put_cmd(struct se_cmd *cmd);
70static void target_complete_ok_work(struct work_struct *work); 71static void target_complete_ok_work(struct work_struct *work);
71 72
@@ -804,7 +805,8 @@ void target_qf_do_work(struct work_struct *work)
804 805
805 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 806 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
806 transport_write_pending_qf(cmd); 807 transport_write_pending_qf(cmd);
807 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) 808 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
809 cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
808 transport_complete_qf(cmd); 810 transport_complete_qf(cmd);
809 } 811 }
810} 812}
@@ -1719,7 +1721,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1719 } 1721 }
1720 trace_target_cmd_complete(cmd); 1722 trace_target_cmd_complete(cmd);
1721 ret = cmd->se_tfo->queue_status(cmd); 1723 ret = cmd->se_tfo->queue_status(cmd);
1722 if (ret == -EAGAIN || ret == -ENOMEM) 1724 if (ret)
1723 goto queue_full; 1725 goto queue_full;
1724 goto check_stop; 1726 goto check_stop;
1725 default: 1727 default:
@@ -1730,7 +1732,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1730 } 1732 }
1731 1733
1732 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 1734 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
1733 if (ret == -EAGAIN || ret == -ENOMEM) 1735 if (ret)
1734 goto queue_full; 1736 goto queue_full;
1735 1737
1736check_stop: 1738check_stop:
@@ -1739,8 +1741,7 @@ check_stop:
1739 return; 1741 return;
1740 1742
1741queue_full: 1743queue_full:
1742 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 1744 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
1743 transport_handle_queue_full(cmd, cmd->se_dev);
1744} 1745}
1745EXPORT_SYMBOL(transport_generic_request_failure); 1746EXPORT_SYMBOL(transport_generic_request_failure);
1746 1747
@@ -1977,13 +1978,29 @@ static void transport_complete_qf(struct se_cmd *cmd)
1977 int ret = 0; 1978 int ret = 0;
1978 1979
1979 transport_complete_task_attr(cmd); 1980 transport_complete_task_attr(cmd);
1981 /*
1982 * If a fabric driver ->write_pending() or ->queue_data_in() callback
1983 * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and
1984 * the same callbacks should not be retried. Return CHECK_CONDITION
1985 * if a scsi_status is not already set.
1986 *
1987 * If a fabric driver ->queue_status() has returned non zero, always
1988 * keep retrying no matter what..
1989 */
1990 if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
1991 if (cmd->scsi_status)
1992 goto queue_status;
1980 1993
1981 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1994 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
1982 trace_target_cmd_complete(cmd); 1995 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
1983 ret = cmd->se_tfo->queue_status(cmd); 1996 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
1984 goto out; 1997 translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
1998 goto queue_status;
1985 } 1999 }
1986 2000
2001 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
2002 goto queue_status;
2003
1987 switch (cmd->data_direction) { 2004 switch (cmd->data_direction) {
1988 case DMA_FROM_DEVICE: 2005 case DMA_FROM_DEVICE:
1989 if (cmd->scsi_status) 2006 if (cmd->scsi_status)
@@ -2007,19 +2024,33 @@ queue_status:
2007 break; 2024 break;
2008 } 2025 }
2009 2026
2010out:
2011 if (ret < 0) { 2027 if (ret < 0) {
2012 transport_handle_queue_full(cmd, cmd->se_dev); 2028 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2013 return; 2029 return;
2014 } 2030 }
2015 transport_lun_remove_cmd(cmd); 2031 transport_lun_remove_cmd(cmd);
2016 transport_cmd_check_stop_to_fabric(cmd); 2032 transport_cmd_check_stop_to_fabric(cmd);
2017} 2033}
2018 2034
2019static void transport_handle_queue_full( 2035static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
2020 struct se_cmd *cmd, 2036 int err, bool write_pending)
2021 struct se_device *dev)
2022{ 2037{
2038 /*
2039 * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or
2040 * ->queue_data_in() callbacks from new process context.
2041 *
2042 * Otherwise for other errors, transport_complete_qf() will send
2043 * CHECK_CONDITION via ->queue_status() instead of attempting to
2044 * retry associated fabric driver data-transfer callbacks.
2045 */
2046 if (err == -EAGAIN || err == -ENOMEM) {
2047 cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
2048 TRANSPORT_COMPLETE_QF_OK;
2049 } else {
2050 pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
2051 cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
2052 }
2053
2023 spin_lock_irq(&dev->qf_cmd_lock); 2054 spin_lock_irq(&dev->qf_cmd_lock);
2024 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 2055 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
2025 atomic_inc_mb(&dev->dev_qf_count); 2056 atomic_inc_mb(&dev->dev_qf_count);
@@ -2083,7 +2114,7 @@ static void target_complete_ok_work(struct work_struct *work)
2083 WARN_ON(!cmd->scsi_status); 2114 WARN_ON(!cmd->scsi_status);
2084 ret = transport_send_check_condition_and_sense( 2115 ret = transport_send_check_condition_and_sense(
2085 cmd, 0, 1); 2116 cmd, 0, 1);
2086 if (ret == -EAGAIN || ret == -ENOMEM) 2117 if (ret)
2087 goto queue_full; 2118 goto queue_full;
2088 2119
2089 transport_lun_remove_cmd(cmd); 2120 transport_lun_remove_cmd(cmd);
@@ -2109,7 +2140,7 @@ static void target_complete_ok_work(struct work_struct *work)
2109 } else if (rc) { 2140 } else if (rc) {
2110 ret = transport_send_check_condition_and_sense(cmd, 2141 ret = transport_send_check_condition_and_sense(cmd,
2111 rc, 0); 2142 rc, 0);
2112 if (ret == -EAGAIN || ret == -ENOMEM) 2143 if (ret)
2113 goto queue_full; 2144 goto queue_full;
2114 2145
2115 transport_lun_remove_cmd(cmd); 2146 transport_lun_remove_cmd(cmd);
@@ -2134,7 +2165,7 @@ queue_rsp:
2134 if (target_read_prot_action(cmd)) { 2165 if (target_read_prot_action(cmd)) {
2135 ret = transport_send_check_condition_and_sense(cmd, 2166 ret = transport_send_check_condition_and_sense(cmd,
2136 cmd->pi_err, 0); 2167 cmd->pi_err, 0);
2137 if (ret == -EAGAIN || ret == -ENOMEM) 2168 if (ret)
2138 goto queue_full; 2169 goto queue_full;
2139 2170
2140 transport_lun_remove_cmd(cmd); 2171 transport_lun_remove_cmd(cmd);
@@ -2144,7 +2175,7 @@ queue_rsp:
2144 2175
2145 trace_target_cmd_complete(cmd); 2176 trace_target_cmd_complete(cmd);
2146 ret = cmd->se_tfo->queue_data_in(cmd); 2177 ret = cmd->se_tfo->queue_data_in(cmd);
2147 if (ret == -EAGAIN || ret == -ENOMEM) 2178 if (ret)
2148 goto queue_full; 2179 goto queue_full;
2149 break; 2180 break;
2150 case DMA_TO_DEVICE: 2181 case DMA_TO_DEVICE:
@@ -2157,7 +2188,7 @@ queue_rsp:
2157 atomic_long_add(cmd->data_length, 2188 atomic_long_add(cmd->data_length,
2158 &cmd->se_lun->lun_stats.tx_data_octets); 2189 &cmd->se_lun->lun_stats.tx_data_octets);
2159 ret = cmd->se_tfo->queue_data_in(cmd); 2190 ret = cmd->se_tfo->queue_data_in(cmd);
2160 if (ret == -EAGAIN || ret == -ENOMEM) 2191 if (ret)
2161 goto queue_full; 2192 goto queue_full;
2162 break; 2193 break;
2163 } 2194 }
@@ -2166,7 +2197,7 @@ queue_rsp:
2166queue_status: 2197queue_status:
2167 trace_target_cmd_complete(cmd); 2198 trace_target_cmd_complete(cmd);
2168 ret = cmd->se_tfo->queue_status(cmd); 2199 ret = cmd->se_tfo->queue_status(cmd);
2169 if (ret == -EAGAIN || ret == -ENOMEM) 2200 if (ret)
2170 goto queue_full; 2201 goto queue_full;
2171 break; 2202 break;
2172 default: 2203 default:
@@ -2180,8 +2211,8 @@ queue_status:
2180queue_full: 2211queue_full:
2181 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 2212 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2182 " data_direction: %d\n", cmd, cmd->data_direction); 2213 " data_direction: %d\n", cmd, cmd->data_direction);
2183 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 2214
2184 transport_handle_queue_full(cmd, cmd->se_dev); 2215 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2185} 2216}
2186 2217
2187void target_free_sgl(struct scatterlist *sgl, int nents) 2218void target_free_sgl(struct scatterlist *sgl, int nents)
@@ -2449,18 +2480,14 @@ transport_generic_new_cmd(struct se_cmd *cmd)
2449 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2480 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2450 2481
2451 ret = cmd->se_tfo->write_pending(cmd); 2482 ret = cmd->se_tfo->write_pending(cmd);
2452 if (ret == -EAGAIN || ret == -ENOMEM) 2483 if (ret)
2453 goto queue_full; 2484 goto queue_full;
2454 2485
2455 /* fabric drivers should only return -EAGAIN or -ENOMEM as error */ 2486 return 0;
2456 WARN_ON(ret);
2457
2458 return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2459 2487
2460queue_full: 2488queue_full:
2461 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2489 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
2462 cmd->t_state = TRANSPORT_COMPLETE_QF_WP; 2490 transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2463 transport_handle_queue_full(cmd, cmd->se_dev);
2464 return 0; 2491 return 0;
2465} 2492}
2466EXPORT_SYMBOL(transport_generic_new_cmd); 2493EXPORT_SYMBOL(transport_generic_new_cmd);
@@ -2470,10 +2497,10 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
2470 int ret; 2497 int ret;
2471 2498
2472 ret = cmd->se_tfo->write_pending(cmd); 2499 ret = cmd->se_tfo->write_pending(cmd);
2473 if (ret == -EAGAIN || ret == -ENOMEM) { 2500 if (ret) {
2474 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2501 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
2475 cmd); 2502 cmd);
2476 transport_handle_queue_full(cmd, cmd->se_dev); 2503 transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2477 } 2504 }
2478} 2505}
2479 2506
@@ -3011,6 +3038,8 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3011 __releases(&cmd->t_state_lock) 3038 __releases(&cmd->t_state_lock)
3012 __acquires(&cmd->t_state_lock) 3039 __acquires(&cmd->t_state_lock)
3013{ 3040{
3041 int ret;
3042
3014 assert_spin_locked(&cmd->t_state_lock); 3043 assert_spin_locked(&cmd->t_state_lock);
3015 WARN_ON_ONCE(!irqs_disabled()); 3044 WARN_ON_ONCE(!irqs_disabled());
3016 3045
@@ -3034,7 +3063,9 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3034 trace_target_cmd_complete(cmd); 3063 trace_target_cmd_complete(cmd);
3035 3064
3036 spin_unlock_irq(&cmd->t_state_lock); 3065 spin_unlock_irq(&cmd->t_state_lock);
3037 cmd->se_tfo->queue_status(cmd); 3066 ret = cmd->se_tfo->queue_status(cmd);
3067 if (ret)
3068 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
3038 spin_lock_irq(&cmd->t_state_lock); 3069 spin_lock_irq(&cmd->t_state_lock);
3039 3070
3040 return 1; 3071 return 1;
@@ -3055,6 +3086,7 @@ EXPORT_SYMBOL(transport_check_aborted_status);
3055void transport_send_task_abort(struct se_cmd *cmd) 3086void transport_send_task_abort(struct se_cmd *cmd)
3056{ 3087{
3057 unsigned long flags; 3088 unsigned long flags;
3089 int ret;
3058 3090
3059 spin_lock_irqsave(&cmd->t_state_lock, flags); 3091 spin_lock_irqsave(&cmd->t_state_lock, flags);
3060 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) { 3092 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
@@ -3090,7 +3122,9 @@ send_abort:
3090 cmd->t_task_cdb[0], cmd->tag); 3122 cmd->t_task_cdb[0], cmd->tag);
3091 3123
3092 trace_target_cmd_complete(cmd); 3124 trace_target_cmd_complete(cmd);
3093 cmd->se_tfo->queue_status(cmd); 3125 ret = cmd->se_tfo->queue_status(cmd);
3126 if (ret)
3127 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
3094} 3128}
3095 3129
3096static void target_tmr_work(struct work_struct *work) 3130static void target_tmr_work(struct work_struct *work)
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index c6874c38a10b..f615c3bbb73e 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -311,24 +311,50 @@ static void free_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd)
311 DATA_BLOCK_BITS); 311 DATA_BLOCK_BITS);
312} 312}
313 313
314static void gather_data_area(struct tcmu_dev *udev, unsigned long *cmd_bitmap, 314static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
315 struct scatterlist *data_sg, unsigned int data_nents) 315 bool bidi)
316{ 316{
317 struct se_cmd *se_cmd = cmd->se_cmd;
317 int i, block; 318 int i, block;
318 int block_remaining = 0; 319 int block_remaining = 0;
319 void *from, *to; 320 void *from, *to;
320 size_t copy_bytes, from_offset; 321 size_t copy_bytes, from_offset;
321 struct scatterlist *sg; 322 struct scatterlist *sg, *data_sg;
323 unsigned int data_nents;
324 DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
325
326 bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
327
328 if (!bidi) {
329 data_sg = se_cmd->t_data_sg;
330 data_nents = se_cmd->t_data_nents;
331 } else {
332 uint32_t count;
333
334 /*
335 * For bidi case, the first count blocks are for Data-Out
336 * buffer blocks, and before gathering the Data-In buffer
337 * the Data-Out buffer blocks should be discarded.
338 */
339 count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
340 while (count--) {
341 block = find_first_bit(bitmap, DATA_BLOCK_BITS);
342 clear_bit(block, bitmap);
343 }
344
345 data_sg = se_cmd->t_bidi_data_sg;
346 data_nents = se_cmd->t_bidi_data_nents;
347 }
322 348
323 for_each_sg(data_sg, sg, data_nents, i) { 349 for_each_sg(data_sg, sg, data_nents, i) {
324 int sg_remaining = sg->length; 350 int sg_remaining = sg->length;
325 to = kmap_atomic(sg_page(sg)) + sg->offset; 351 to = kmap_atomic(sg_page(sg)) + sg->offset;
326 while (sg_remaining > 0) { 352 while (sg_remaining > 0) {
327 if (block_remaining == 0) { 353 if (block_remaining == 0) {
328 block = find_first_bit(cmd_bitmap, 354 block = find_first_bit(bitmap,
329 DATA_BLOCK_BITS); 355 DATA_BLOCK_BITS);
330 block_remaining = DATA_BLOCK_SIZE; 356 block_remaining = DATA_BLOCK_SIZE;
331 clear_bit(block, cmd_bitmap); 357 clear_bit(block, bitmap);
332 } 358 }
333 copy_bytes = min_t(size_t, sg_remaining, 359 copy_bytes = min_t(size_t, sg_remaining,
334 block_remaining); 360 block_remaining);
@@ -394,6 +420,27 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d
394 return true; 420 return true;
395} 421}
396 422
423static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
424{
425 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
426 size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
427
428 if (se_cmd->se_cmd_flags & SCF_BIDI) {
429 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
430 data_length += round_up(se_cmd->t_bidi_data_sg->length,
431 DATA_BLOCK_SIZE);
432 }
433
434 return data_length;
435}
436
437static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
438{
439 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
440
441 return data_length / DATA_BLOCK_SIZE;
442}
443
397static sense_reason_t 444static sense_reason_t
398tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) 445tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
399{ 446{
@@ -407,7 +454,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
407 uint32_t cmd_head; 454 uint32_t cmd_head;
408 uint64_t cdb_off; 455 uint64_t cdb_off;
409 bool copy_to_data_area; 456 bool copy_to_data_area;
410 size_t data_length; 457 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
411 DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS); 458 DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
412 459
413 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) 460 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
@@ -421,8 +468,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
421 * expensive to tell how many regions are freed in the bitmap 468 * expensive to tell how many regions are freed in the bitmap
422 */ 469 */
423 base_command_size = max(offsetof(struct tcmu_cmd_entry, 470 base_command_size = max(offsetof(struct tcmu_cmd_entry,
424 req.iov[se_cmd->t_bidi_data_nents + 471 req.iov[tcmu_cmd_get_block_cnt(tcmu_cmd)]),
425 se_cmd->t_data_nents]),
426 sizeof(struct tcmu_cmd_entry)); 472 sizeof(struct tcmu_cmd_entry));
427 command_size = base_command_size 473 command_size = base_command_size
428 + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE); 474 + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
@@ -433,11 +479,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
433 479
434 mb = udev->mb_addr; 480 mb = udev->mb_addr;
435 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 481 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
436 data_length = se_cmd->data_length;
437 if (se_cmd->se_cmd_flags & SCF_BIDI) {
438 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
439 data_length += se_cmd->t_bidi_data_sg->length;
440 }
441 if ((command_size > (udev->cmdr_size / 2)) || 482 if ((command_size > (udev->cmdr_size / 2)) ||
442 data_length > udev->data_size) { 483 data_length > udev->data_size) {
443 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu " 484 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
@@ -511,11 +552,14 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
511 entry->req.iov_dif_cnt = 0; 552 entry->req.iov_dif_cnt = 0;
512 553
513 /* Handle BIDI commands */ 554 /* Handle BIDI commands */
514 iov_cnt = 0; 555 if (se_cmd->se_cmd_flags & SCF_BIDI) {
515 alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg, 556 iov_cnt = 0;
516 se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false); 557 iov++;
517 entry->req.iov_bidi_cnt = iov_cnt; 558 alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
518 559 se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
560 false);
561 entry->req.iov_bidi_cnt = iov_cnt;
562 }
519 /* cmd's data_bitmap is what changed in process */ 563 /* cmd's data_bitmap is what changed in process */
520 bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap, 564 bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap,
521 DATA_BLOCK_BITS); 565 DATA_BLOCK_BITS);
@@ -592,19 +636,11 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
592 se_cmd->scsi_sense_length); 636 se_cmd->scsi_sense_length);
593 free_data_area(udev, cmd); 637 free_data_area(udev, cmd);
594 } else if (se_cmd->se_cmd_flags & SCF_BIDI) { 638 } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
595 DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
596
597 /* Get Data-In buffer before clean up */ 639 /* Get Data-In buffer before clean up */
598 bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS); 640 gather_data_area(udev, cmd, true);
599 gather_data_area(udev, bitmap,
600 se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
601 free_data_area(udev, cmd); 641 free_data_area(udev, cmd);
602 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 642 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
603 DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS); 643 gather_data_area(udev, cmd, false);
604
605 bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
606 gather_data_area(udev, bitmap,
607 se_cmd->t_data_sg, se_cmd->t_data_nents);
608 free_data_area(udev, cmd); 644 free_data_area(udev, cmd);
609 } else if (se_cmd->data_direction == DMA_TO_DEVICE) { 645 } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
610 free_data_area(udev, cmd); 646 free_data_area(udev, cmd);
@@ -1196,11 +1232,6 @@ static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *pag
1196 if (ret < 0) 1232 if (ret < 0)
1197 return ret; 1233 return ret;
1198 1234
1199 if (!val) {
1200 pr_err("Illegal value for cmd_time_out\n");
1201 return -EINVAL;
1202 }
1203
1204 udev->cmd_time_out = val * MSEC_PER_SEC; 1235 udev->cmd_time_out = val * MSEC_PER_SEC;
1205 return count; 1236 return count;
1206} 1237}
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 91048eeca28b..69d0f430b2d1 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -107,8 +107,6 @@ struct cpufreq_cooling_device {
107}; 107};
108static DEFINE_IDA(cpufreq_ida); 108static DEFINE_IDA(cpufreq_ida);
109 109
110static unsigned int cpufreq_dev_count;
111
112static DEFINE_MUTEX(cooling_list_lock); 110static DEFINE_MUTEX(cooling_list_lock);
113static LIST_HEAD(cpufreq_dev_list); 111static LIST_HEAD(cpufreq_dev_list);
114 112
@@ -395,13 +393,20 @@ static int get_static_power(struct cpufreq_cooling_device *cpufreq_device,
395 393
396 opp = dev_pm_opp_find_freq_exact(cpufreq_device->cpu_dev, freq_hz, 394 opp = dev_pm_opp_find_freq_exact(cpufreq_device->cpu_dev, freq_hz,
397 true); 395 true);
396 if (IS_ERR(opp)) {
397 dev_warn_ratelimited(cpufreq_device->cpu_dev,
398 "Failed to find OPP for frequency %lu: %ld\n",
399 freq_hz, PTR_ERR(opp));
400 return -EINVAL;
401 }
402
398 voltage = dev_pm_opp_get_voltage(opp); 403 voltage = dev_pm_opp_get_voltage(opp);
399 dev_pm_opp_put(opp); 404 dev_pm_opp_put(opp);
400 405
401 if (voltage == 0) { 406 if (voltage == 0) {
402 dev_warn_ratelimited(cpufreq_device->cpu_dev, 407 dev_err_ratelimited(cpufreq_device->cpu_dev,
403 "Failed to get voltage for frequency %lu: %ld\n", 408 "Failed to get voltage for frequency %lu\n",
404 freq_hz, IS_ERR(opp) ? PTR_ERR(opp) : 0); 409 freq_hz);
405 return -EINVAL; 410 return -EINVAL;
406 } 411 }
407 412
@@ -693,9 +698,9 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
693 698
694 *state = cpufreq_cooling_get_level(cpu, target_freq); 699 *state = cpufreq_cooling_get_level(cpu, target_freq);
695 if (*state == THERMAL_CSTATE_INVALID) { 700 if (*state == THERMAL_CSTATE_INVALID) {
696 dev_warn_ratelimited(&cdev->device, 701 dev_err_ratelimited(&cdev->device,
697 "Failed to convert %dKHz for cpu %d into a cdev state\n", 702 "Failed to convert %dKHz for cpu %d into a cdev state\n",
698 target_freq, cpu); 703 target_freq, cpu);
699 return -EINVAL; 704 return -EINVAL;
700 } 705 }
701 706
@@ -771,6 +776,7 @@ __cpufreq_cooling_register(struct device_node *np,
771 unsigned int freq, i, num_cpus; 776 unsigned int freq, i, num_cpus;
772 int ret; 777 int ret;
773 struct thermal_cooling_device_ops *cooling_ops; 778 struct thermal_cooling_device_ops *cooling_ops;
779 bool first;
774 780
775 if (!alloc_cpumask_var(&temp_mask, GFP_KERNEL)) 781 if (!alloc_cpumask_var(&temp_mask, GFP_KERNEL))
776 return ERR_PTR(-ENOMEM); 782 return ERR_PTR(-ENOMEM);
@@ -874,13 +880,14 @@ __cpufreq_cooling_register(struct device_node *np,
874 cpufreq_dev->cool_dev = cool_dev; 880 cpufreq_dev->cool_dev = cool_dev;
875 881
876 mutex_lock(&cooling_list_lock); 882 mutex_lock(&cooling_list_lock);
883 /* Register the notifier for first cpufreq cooling device */
884 first = list_empty(&cpufreq_dev_list);
877 list_add(&cpufreq_dev->node, &cpufreq_dev_list); 885 list_add(&cpufreq_dev->node, &cpufreq_dev_list);
886 mutex_unlock(&cooling_list_lock);
878 887
879 /* Register the notifier for first cpufreq cooling device */ 888 if (first)
880 if (!cpufreq_dev_count++)
881 cpufreq_register_notifier(&thermal_cpufreq_notifier_block, 889 cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
882 CPUFREQ_POLICY_NOTIFIER); 890 CPUFREQ_POLICY_NOTIFIER);
883 mutex_unlock(&cooling_list_lock);
884 891
885 goto put_policy; 892 goto put_policy;
886 893
@@ -1021,6 +1028,7 @@ EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
1021void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) 1028void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
1022{ 1029{
1023 struct cpufreq_cooling_device *cpufreq_dev; 1030 struct cpufreq_cooling_device *cpufreq_dev;
1031 bool last;
1024 1032
1025 if (!cdev) 1033 if (!cdev)
1026 return; 1034 return;
@@ -1028,14 +1036,15 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
1028 cpufreq_dev = cdev->devdata; 1036 cpufreq_dev = cdev->devdata;
1029 1037
1030 mutex_lock(&cooling_list_lock); 1038 mutex_lock(&cooling_list_lock);
1039 list_del(&cpufreq_dev->node);
1031 /* Unregister the notifier for the last cpufreq cooling device */ 1040 /* Unregister the notifier for the last cpufreq cooling device */
1032 if (!--cpufreq_dev_count) 1041 last = list_empty(&cpufreq_dev_list);
1042 mutex_unlock(&cooling_list_lock);
1043
1044 if (last)
1033 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block, 1045 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
1034 CPUFREQ_POLICY_NOTIFIER); 1046 CPUFREQ_POLICY_NOTIFIER);
1035 1047
1036 list_del(&cpufreq_dev->node);
1037 mutex_unlock(&cooling_list_lock);
1038
1039 thermal_cooling_device_unregister(cpufreq_dev->cool_dev); 1048 thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
1040 ida_simple_remove(&cpufreq_ida, cpufreq_dev->id); 1049 ida_simple_remove(&cpufreq_ida, cpufreq_dev->id);
1041 kfree(cpufreq_dev->dyn_power_table); 1050 kfree(cpufreq_dev->dyn_power_table);
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
index 7743a78d4723..4bf4ad58cffd 100644
--- a/drivers/thermal/devfreq_cooling.c
+++ b/drivers/thermal/devfreq_cooling.c
@@ -186,16 +186,22 @@ get_static_power(struct devfreq_cooling_device *dfc, unsigned long freq)
186 return 0; 186 return 0;
187 187
188 opp = dev_pm_opp_find_freq_exact(dev, freq, true); 188 opp = dev_pm_opp_find_freq_exact(dev, freq, true);
189 if (IS_ERR(opp) && (PTR_ERR(opp) == -ERANGE)) 189 if (PTR_ERR(opp) == -ERANGE)
190 opp = dev_pm_opp_find_freq_exact(dev, freq, false); 190 opp = dev_pm_opp_find_freq_exact(dev, freq, false);
191 191
192 if (IS_ERR(opp)) {
193 dev_err_ratelimited(dev, "Failed to find OPP for frequency %lu: %ld\n",
194 freq, PTR_ERR(opp));
195 return 0;
196 }
197
192 voltage = dev_pm_opp_get_voltage(opp) / 1000; /* mV */ 198 voltage = dev_pm_opp_get_voltage(opp) / 1000; /* mV */
193 dev_pm_opp_put(opp); 199 dev_pm_opp_put(opp);
194 200
195 if (voltage == 0) { 201 if (voltage == 0) {
196 dev_warn_ratelimited(dev, 202 dev_err_ratelimited(dev,
197 "Failed to get voltage for frequency %lu: %ld\n", 203 "Failed to get voltage for frequency %lu\n",
198 freq, IS_ERR(opp) ? PTR_ERR(opp) : 0); 204 freq);
199 return 0; 205 return 0;
200 } 206 }
201 207
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index a65fb8197aec..0e3f529d50e9 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -128,9 +128,13 @@ config SERIAL_8250_PCI
128 by the parport_serial driver, enabled with CONFIG_PARPORT_SERIAL. 128 by the parport_serial driver, enabled with CONFIG_PARPORT_SERIAL.
129 129
130config SERIAL_8250_EXAR 130config SERIAL_8250_EXAR
131 tristate "8250/16550 PCI device support" 131 tristate "8250/16550 Exar/Commtech PCI/PCIe device support"
132 depends on SERIAL_8250_PCI 132 depends on SERIAL_8250_PCI
133 default SERIAL_8250 133 default SERIAL_8250
134 help
135 This builds support for XR17C1xx, XR17V3xx and some Commtech
136 422x PCIe serial cards that are not covered by the more generic
137 SERIAL_8250_PCI option.
134 138
135config SERIAL_8250_HP300 139config SERIAL_8250_HP300
136 tristate 140 tristate
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 56f92d7348bf..b0a377725d63 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2452,18 +2452,37 @@ static void pl011_early_write(struct console *con, const char *s, unsigned n)
2452 uart_console_write(&dev->port, s, n, pl011_putc); 2452 uart_console_write(&dev->port, s, n, pl011_putc);
2453} 2453}
2454 2454
2455/*
2456 * On non-ACPI systems, earlycon is enabled by specifying
2457 * "earlycon=pl011,<address>" on the kernel command line.
2458 *
2459 * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table,
2460 * by specifying only "earlycon" on the command line. Because it requires
2461 * SPCR, the console starts after ACPI is parsed, which is later than a
2462 * traditional early console.
2463 *
2464 * To get the traditional early console that starts before ACPI is parsed,
2465 * specify the full "earlycon=pl011,<address>" option.
2466 */
2455static int __init pl011_early_console_setup(struct earlycon_device *device, 2467static int __init pl011_early_console_setup(struct earlycon_device *device,
2456 const char *opt) 2468 const char *opt)
2457{ 2469{
2458 if (!device->port.membase) 2470 if (!device->port.membase)
2459 return -ENODEV; 2471 return -ENODEV;
2460 2472
2461 device->con->write = qdf2400_e44_present ? 2473 /* On QDF2400 SOCs affected by Erratum 44, the "qdf2400_e44" must
2462 qdf2400_e44_early_write : pl011_early_write; 2474 * also be specified, e.g. "earlycon=pl011,<address>,qdf2400_e44".
2475 */
2476 if (!strcmp(device->options, "qdf2400_e44"))
2477 device->con->write = qdf2400_e44_early_write;
2478 else
2479 device->con->write = pl011_early_write;
2480
2463 return 0; 2481 return 0;
2464} 2482}
2465OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup); 2483OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
2466OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup); 2484OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
2485EARLYCON_DECLARE(qdf2400_e44, pl011_early_console_setup);
2467 2486
2468#else 2487#else
2469#define AMBA_CONSOLE NULL 2488#define AMBA_CONSOLE NULL
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index dcebb28ffbc4..1f50a83ef958 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1951,6 +1951,11 @@ static void atmel_flush_buffer(struct uart_port *port)
1951 atmel_uart_writel(port, ATMEL_PDC_TCR, 0); 1951 atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
1952 atmel_port->pdc_tx.ofs = 0; 1952 atmel_port->pdc_tx.ofs = 0;
1953 } 1953 }
1954 /*
1955 * in uart_flush_buffer(), the xmit circular buffer has just
1956 * been cleared, so we have to reset tx_len accordingly.
1957 */
1958 atmel_port->tx_len = 0;
1954} 1959}
1955 1960
1956/* 1961/*
@@ -2483,6 +2488,9 @@ static void atmel_console_write(struct console *co, const char *s, u_int count)
2483 pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN; 2488 pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
2484 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 2489 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
2485 2490
2491 /* Make sure that tx path is actually able to send characters */
2492 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
2493
2486 uart_console_write(port, s, count, atmel_console_putchar); 2494 uart_console_write(port, s, count, atmel_console_putchar);
2487 2495
2488 /* 2496 /*
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 6989b227d134..be94246b6fcc 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1088,7 +1088,7 @@ static void mxs_auart_settermios(struct uart_port *u,
1088 AUART_LINECTRL_BAUD_DIV_MAX); 1088 AUART_LINECTRL_BAUD_DIV_MAX);
1089 baud_max = u->uartclk * 32 / AUART_LINECTRL_BAUD_DIV_MIN; 1089 baud_max = u->uartclk * 32 / AUART_LINECTRL_BAUD_DIV_MIN;
1090 baud = uart_get_baud_rate(u, termios, old, baud_min, baud_max); 1090 baud = uart_get_baud_rate(u, termios, old, baud_min, baud_max);
1091 div = u->uartclk * 32 / baud; 1091 div = DIV_ROUND_CLOSEST(u->uartclk * 32, baud);
1092 } 1092 }
1093 1093
1094 ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F); 1094 ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index b0500a0a87b8..e4603b09863a 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -492,6 +492,41 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
492} 492}
493 493
494/** 494/**
495 * tty_ldisc_restore - helper for tty ldisc change
496 * @tty: tty to recover
497 * @old: previous ldisc
498 *
499 * Restore the previous line discipline or N_TTY when a line discipline
500 * change fails due to an open error
501 */
502
503static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
504{
505 struct tty_ldisc *new_ldisc;
506 int r;
507
508 /* There is an outstanding reference here so this is safe */
509 old = tty_ldisc_get(tty, old->ops->num);
510 WARN_ON(IS_ERR(old));
511 tty->ldisc = old;
512 tty_set_termios_ldisc(tty, old->ops->num);
513 if (tty_ldisc_open(tty, old) < 0) {
514 tty_ldisc_put(old);
515 /* This driver is always present */
516 new_ldisc = tty_ldisc_get(tty, N_TTY);
517 if (IS_ERR(new_ldisc))
518 panic("n_tty: get");
519 tty->ldisc = new_ldisc;
520 tty_set_termios_ldisc(tty, N_TTY);
521 r = tty_ldisc_open(tty, new_ldisc);
522 if (r < 0)
523 panic("Couldn't open N_TTY ldisc for "
524 "%s --- error %d.",
525 tty_name(tty), r);
526 }
527}
528
529/**
495 * tty_set_ldisc - set line discipline 530 * tty_set_ldisc - set line discipline
496 * @tty: the terminal to set 531 * @tty: the terminal to set
497 * @ldisc: the line discipline 532 * @ldisc: the line discipline
@@ -504,7 +539,12 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
504 539
505int tty_set_ldisc(struct tty_struct *tty, int disc) 540int tty_set_ldisc(struct tty_struct *tty, int disc)
506{ 541{
507 int retval, old_disc; 542 int retval;
543 struct tty_ldisc *old_ldisc, *new_ldisc;
544
545 new_ldisc = tty_ldisc_get(tty, disc);
546 if (IS_ERR(new_ldisc))
547 return PTR_ERR(new_ldisc);
508 548
509 tty_lock(tty); 549 tty_lock(tty);
510 retval = tty_ldisc_lock(tty, 5 * HZ); 550 retval = tty_ldisc_lock(tty, 5 * HZ);
@@ -517,8 +557,7 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
517 } 557 }
518 558
519 /* Check the no-op case */ 559 /* Check the no-op case */
520 old_disc = tty->ldisc->ops->num; 560 if (tty->ldisc->ops->num == disc)
521 if (old_disc == disc)
522 goto out; 561 goto out;
523 562
524 if (test_bit(TTY_HUPPED, &tty->flags)) { 563 if (test_bit(TTY_HUPPED, &tty->flags)) {
@@ -527,25 +566,34 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
527 goto out; 566 goto out;
528 } 567 }
529 568
530 retval = tty_ldisc_reinit(tty, disc); 569 old_ldisc = tty->ldisc;
570
571 /* Shutdown the old discipline. */
572 tty_ldisc_close(tty, old_ldisc);
573
574 /* Now set up the new line discipline. */
575 tty->ldisc = new_ldisc;
576 tty_set_termios_ldisc(tty, disc);
577
578 retval = tty_ldisc_open(tty, new_ldisc);
531 if (retval < 0) { 579 if (retval < 0) {
532 /* Back to the old one or N_TTY if we can't */ 580 /* Back to the old one or N_TTY if we can't */
533 if (tty_ldisc_reinit(tty, old_disc) < 0) { 581 tty_ldisc_put(new_ldisc);
534 pr_err("tty: TIOCSETD failed, reinitializing N_TTY\n"); 582 tty_ldisc_restore(tty, old_ldisc);
535 if (tty_ldisc_reinit(tty, N_TTY) < 0) {
536 /* At this point we have tty->ldisc == NULL. */
537 pr_err("tty: reinitializing N_TTY failed\n");
538 }
539 }
540 } 583 }
541 584
542 if (tty->ldisc && tty->ldisc->ops->num != old_disc && 585 if (tty->ldisc->ops->num != old_ldisc->ops->num && tty->ops->set_ldisc) {
543 tty->ops->set_ldisc) {
544 down_read(&tty->termios_rwsem); 586 down_read(&tty->termios_rwsem);
545 tty->ops->set_ldisc(tty); 587 tty->ops->set_ldisc(tty);
546 up_read(&tty->termios_rwsem); 588 up_read(&tty->termios_rwsem);
547 } 589 }
548 590
591 /* At this point we hold a reference to the new ldisc and a
592 reference to the old ldisc, or we hold two references to
593 the old ldisc (if it was restored as part of error cleanup
594 above). In either case, releasing a single reference from
595 the old ldisc is correct. */
596 new_ldisc = old_ldisc;
549out: 597out:
550 tty_ldisc_unlock(tty); 598 tty_ldisc_unlock(tty);
551 599
@@ -553,6 +601,7 @@ out:
553 already running */ 601 already running */
554 tty_buffer_restart_work(tty->port); 602 tty_buffer_restart_work(tty->port);
555err: 603err:
604 tty_ldisc_put(new_ldisc); /* drop the extra reference */
556 tty_unlock(tty); 605 tty_unlock(tty);
557 return retval; 606 return retval;
558} 607}
@@ -613,8 +662,10 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
613 int retval; 662 int retval;
614 663
615 ld = tty_ldisc_get(tty, disc); 664 ld = tty_ldisc_get(tty, disc);
616 if (IS_ERR(ld)) 665 if (IS_ERR(ld)) {
666 BUG_ON(disc == N_TTY);
617 return PTR_ERR(ld); 667 return PTR_ERR(ld);
668 }
618 669
619 if (tty->ldisc) { 670 if (tty->ldisc) {
620 tty_ldisc_close(tty, tty->ldisc); 671 tty_ldisc_close(tty, tty->ldisc);
@@ -626,8 +677,10 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
626 tty_set_termios_ldisc(tty, disc); 677 tty_set_termios_ldisc(tty, disc);
627 retval = tty_ldisc_open(tty, tty->ldisc); 678 retval = tty_ldisc_open(tty, tty->ldisc);
628 if (retval) { 679 if (retval) {
629 tty_ldisc_put(tty->ldisc); 680 if (!WARN_ON(disc == N_TTY)) {
630 tty->ldisc = NULL; 681 tty_ldisc_put(tty->ldisc);
682 tty->ldisc = NULL;
683 }
631 } 684 }
632 return retval; 685 return retval;
633} 686}
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index c5f0fc906136..8af8d9542663 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -28,7 +28,6 @@
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/sched/signal.h> 29#include <linux/sched/signal.h>
30#include <linux/sched/debug.h> 30#include <linux/sched/debug.h>
31#include <linux/sched/debug.h>
32#include <linux/tty.h> 31#include <linux/tty.h>
33#include <linux/tty_flip.h> 32#include <linux/tty_flip.h>
34#include <linux/mm.h> 33#include <linux/mm.h>
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 612fab6e54fb..79bdca5cb9c7 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -520,8 +520,10 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
520 */ 520 */
521 tbuf_size = max_t(u16, sizeof(struct usb_hub_descriptor), wLength); 521 tbuf_size = max_t(u16, sizeof(struct usb_hub_descriptor), wLength);
522 tbuf = kzalloc(tbuf_size, GFP_KERNEL); 522 tbuf = kzalloc(tbuf_size, GFP_KERNEL);
523 if (!tbuf) 523 if (!tbuf) {
524 return -ENOMEM; 524 status = -ENOMEM;
525 goto err_alloc;
526 }
525 527
526 bufp = tbuf; 528 bufp = tbuf;
527 529
@@ -734,6 +736,7 @@ error:
734 } 736 }
735 737
736 kfree(tbuf); 738 kfree(tbuf);
739 err_alloc:
737 740
738 /* any errors get returned through the urb completion */ 741 /* any errors get returned through the urb completion */
739 spin_lock_irq(&hcd_root_hub_lock); 742 spin_lock_irq(&hcd_root_hub_lock);
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index d2351139342f..a82e2bd5ea34 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -373,7 +373,7 @@ static void bot_cleanup_old_alt(struct f_uas *fu)
373 usb_ep_free_request(fu->ep_in, fu->bot_req_in); 373 usb_ep_free_request(fu->ep_in, fu->bot_req_in);
374 usb_ep_free_request(fu->ep_out, fu->bot_req_out); 374 usb_ep_free_request(fu->ep_out, fu->bot_req_out);
375 usb_ep_free_request(fu->ep_out, fu->cmd.req); 375 usb_ep_free_request(fu->ep_out, fu->cmd.req);
376 usb_ep_free_request(fu->ep_out, fu->bot_status.req); 376 usb_ep_free_request(fu->ep_in, fu->bot_status.req);
377 377
378 kfree(fu->cmd.buf); 378 kfree(fu->cmd.buf);
379 379
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index bd02a6cd8e2c..6ed468fa7d5e 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -344,6 +344,7 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
344static struct platform_driver usb_xhci_driver = { 344static struct platform_driver usb_xhci_driver = {
345 .probe = xhci_plat_probe, 345 .probe = xhci_plat_probe,
346 .remove = xhci_plat_remove, 346 .remove = xhci_plat_remove,
347 .shutdown = usb_hcd_platform_shutdown,
347 .driver = { 348 .driver = {
348 .name = "xhci-hcd", 349 .name = "xhci-hcd",
349 .pm = DEV_PM_OPS, 350 .pm = DEV_PM_OPS,
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index d9936c771fa0..a3309aa02993 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1989,6 +1989,9 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1989 case TRB_NORMAL: 1989 case TRB_NORMAL:
1990 td->urb->actual_length = requested - remaining; 1990 td->urb->actual_length = requested - remaining;
1991 goto finish_td; 1991 goto finish_td;
1992 case TRB_STATUS:
1993 td->urb->actual_length = requested;
1994 goto finish_td;
1992 default: 1995 default:
1993 xhci_warn(xhci, "WARN: unexpected TRB Type %d\n", 1996 xhci_warn(xhci, "WARN: unexpected TRB Type %d\n",
1994 trb_type); 1997 trb_type);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 50aee8b7718b..953fd8f62df0 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1477,6 +1477,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1477 struct xhci_ring *ep_ring; 1477 struct xhci_ring *ep_ring;
1478 struct xhci_virt_ep *ep; 1478 struct xhci_virt_ep *ep;
1479 struct xhci_command *command; 1479 struct xhci_command *command;
1480 struct xhci_virt_device *vdev;
1480 1481
1481 xhci = hcd_to_xhci(hcd); 1482 xhci = hcd_to_xhci(hcd);
1482 spin_lock_irqsave(&xhci->lock, flags); 1483 spin_lock_irqsave(&xhci->lock, flags);
@@ -1485,15 +1486,27 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1485 1486
1486 /* Make sure the URB hasn't completed or been unlinked already */ 1487 /* Make sure the URB hasn't completed or been unlinked already */
1487 ret = usb_hcd_check_unlink_urb(hcd, urb, status); 1488 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1488 if (ret || !urb->hcpriv) 1489 if (ret)
1489 goto done; 1490 goto done;
1491
1492 /* give back URB now if we can't queue it for cancel */
1493 vdev = xhci->devs[urb->dev->slot_id];
1494 urb_priv = urb->hcpriv;
1495 if (!vdev || !urb_priv)
1496 goto err_giveback;
1497
1498 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1499 ep = &vdev->eps[ep_index];
1500 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1501 if (!ep || !ep_ring)
1502 goto err_giveback;
1503
1490 temp = readl(&xhci->op_regs->status); 1504 temp = readl(&xhci->op_regs->status);
1491 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { 1505 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1492 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1506 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1493 "HW died, freeing TD."); 1507 "HW died, freeing TD.");
1494 urb_priv = urb->hcpriv;
1495 for (i = urb_priv->num_tds_done; 1508 for (i = urb_priv->num_tds_done;
1496 i < urb_priv->num_tds && xhci->devs[urb->dev->slot_id]; 1509 i < urb_priv->num_tds;
1497 i++) { 1510 i++) {
1498 td = &urb_priv->td[i]; 1511 td = &urb_priv->td[i];
1499 if (!list_empty(&td->td_list)) 1512 if (!list_empty(&td->td_list))
@@ -1501,23 +1514,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1501 if (!list_empty(&td->cancelled_td_list)) 1514 if (!list_empty(&td->cancelled_td_list))
1502 list_del_init(&td->cancelled_td_list); 1515 list_del_init(&td->cancelled_td_list);
1503 } 1516 }
1504 1517 goto err_giveback;
1505 usb_hcd_unlink_urb_from_ep(hcd, urb);
1506 spin_unlock_irqrestore(&xhci->lock, flags);
1507 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1508 xhci_urb_free_priv(urb_priv);
1509 return ret;
1510 } 1518 }
1511 1519
1512 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1513 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
1514 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1515 if (!ep_ring) {
1516 ret = -EINVAL;
1517 goto done;
1518 }
1519
1520 urb_priv = urb->hcpriv;
1521 i = urb_priv->num_tds_done; 1520 i = urb_priv->num_tds_done;
1522 if (i < urb_priv->num_tds) 1521 if (i < urb_priv->num_tds)
1523 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1522 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
@@ -1554,6 +1553,14 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1554done: 1553done:
1555 spin_unlock_irqrestore(&xhci->lock, flags); 1554 spin_unlock_irqrestore(&xhci->lock, flags);
1556 return ret; 1555 return ret;
1556
1557err_giveback:
1558 if (urb_priv)
1559 xhci_urb_free_priv(urb_priv);
1560 usb_hcd_unlink_urb_from_ep(hcd, urb);
1561 spin_unlock_irqrestore(&xhci->lock, flags);
1562 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1563 return ret;
1557} 1564}
1558 1565
1559/* Drop an endpoint from a new bandwidth configuration for this device. 1566/* Drop an endpoint from a new bandwidth configuration for this device.
diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c
index b3b33cf7ddf6..f333024660b4 100644
--- a/drivers/usb/phy/phy-isp1301.c
+++ b/drivers/usb/phy/phy-isp1301.c
@@ -136,7 +136,7 @@ static int isp1301_remove(struct i2c_client *client)
136static struct i2c_driver isp1301_driver = { 136static struct i2c_driver isp1301_driver = {
137 .driver = { 137 .driver = {
138 .name = DRV_NAME, 138 .name = DRV_NAME,
139 .of_match_table = of_match_ptr(isp1301_of_match), 139 .of_match_table = isp1301_of_match,
140 }, 140 },
141 .probe = isp1301_probe, 141 .probe = isp1301_probe,
142 .remove = isp1301_remove, 142 .remove = isp1301_remove,
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 8c4dc1e1f94f..b827a8113e26 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -10,6 +10,7 @@
10#include <linux/efi.h> 10#include <linux/efi.h>
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/fb.h> 12#include <linux/fb.h>
13#include <linux/pci.h>
13#include <linux/platform_device.h> 14#include <linux/platform_device.h>
14#include <linux/screen_info.h> 15#include <linux/screen_info.h>
15#include <video/vga.h> 16#include <video/vga.h>
@@ -143,6 +144,8 @@ static struct attribute *efifb_attrs[] = {
143}; 144};
144ATTRIBUTE_GROUPS(efifb); 145ATTRIBUTE_GROUPS(efifb);
145 146
147static bool pci_dev_disabled; /* FB base matches BAR of a disabled device */
148
146static int efifb_probe(struct platform_device *dev) 149static int efifb_probe(struct platform_device *dev)
147{ 150{
148 struct fb_info *info; 151 struct fb_info *info;
@@ -152,7 +155,7 @@ static int efifb_probe(struct platform_device *dev)
152 unsigned int size_total; 155 unsigned int size_total;
153 char *option = NULL; 156 char *option = NULL;
154 157
155 if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) 158 if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled)
156 return -ENODEV; 159 return -ENODEV;
157 160
158 if (fb_get_options("efifb", &option)) 161 if (fb_get_options("efifb", &option))
@@ -360,3 +363,64 @@ static struct platform_driver efifb_driver = {
360}; 363};
361 364
362builtin_platform_driver(efifb_driver); 365builtin_platform_driver(efifb_driver);
366
367#if defined(CONFIG_PCI) && !defined(CONFIG_X86)
368
369static bool pci_bar_found; /* did we find a BAR matching the efifb base? */
370
371static void claim_efifb_bar(struct pci_dev *dev, int idx)
372{
373 u16 word;
374
375 pci_bar_found = true;
376
377 pci_read_config_word(dev, PCI_COMMAND, &word);
378 if (!(word & PCI_COMMAND_MEMORY)) {
379 pci_dev_disabled = true;
380 dev_err(&dev->dev,
381 "BAR %d: assigned to efifb but device is disabled!\n",
382 idx);
383 return;
384 }
385
386 if (pci_claim_resource(dev, idx)) {
387 pci_dev_disabled = true;
388 dev_err(&dev->dev,
389 "BAR %d: failed to claim resource for efifb!\n", idx);
390 return;
391 }
392
393 dev_info(&dev->dev, "BAR %d: assigned to efifb\n", idx);
394}
395
396static void efifb_fixup_resources(struct pci_dev *dev)
397{
398 u64 base = screen_info.lfb_base;
399 u64 size = screen_info.lfb_size;
400 int i;
401
402 if (pci_bar_found || screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
403 return;
404
405 if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
406 base |= (u64)screen_info.ext_lfb_base << 32;
407
408 if (!base)
409 return;
410
411 for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
412 struct resource *res = &dev->resource[i];
413
414 if (!(res->flags & IORESOURCE_MEM))
415 continue;
416
417 if (res->start <= base && res->end >= base + size - 1) {
418 claim_efifb_bar(dev, i);
419 break;
420 }
421 }
422}
423DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY,
424 16, efifb_fixup_resources);
425
426#endif
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 1abba07b84b3..f4cbfb3b8a09 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -1608,19 +1608,6 @@ static int omapfb_find_ctrl(struct omapfb_device *fbdev)
1608 return 0; 1608 return 0;
1609} 1609}
1610 1610
1611static void check_required_callbacks(struct omapfb_device *fbdev)
1612{
1613#define _C(x) (fbdev->ctrl->x != NULL)
1614#define _P(x) (fbdev->panel->x != NULL)
1615 BUG_ON(fbdev->ctrl == NULL || fbdev->panel == NULL);
1616 BUG_ON(!(_C(init) && _C(cleanup) && _C(get_caps) &&
1617 _C(set_update_mode) && _C(setup_plane) && _C(enable_plane) &&
1618 _P(init) && _P(cleanup) && _P(enable) && _P(disable) &&
1619 _P(get_caps)));
1620#undef _P
1621#undef _C
1622}
1623
1624/* 1611/*
1625 * Called by LDM binding to probe and attach a new device. 1612 * Called by LDM binding to probe and attach a new device.
1626 * Initialization sequence: 1613 * Initialization sequence:
@@ -1705,8 +1692,6 @@ static int omapfb_do_probe(struct platform_device *pdev,
1705 omapfb_ops.fb_mmap = omapfb_mmap; 1692 omapfb_ops.fb_mmap = omapfb_mmap;
1706 init_state++; 1693 init_state++;
1707 1694
1708 check_required_callbacks(fbdev);
1709
1710 r = planes_init(fbdev); 1695 r = planes_init(fbdev);
1711 if (r) 1696 if (r)
1712 goto cleanup; 1697 goto cleanup;
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index bd017b57c47f..f599520374dd 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -578,10 +578,14 @@ static int ssd1307fb_probe(struct i2c_client *client,
578 578
579 par->vbat_reg = devm_regulator_get_optional(&client->dev, "vbat"); 579 par->vbat_reg = devm_regulator_get_optional(&client->dev, "vbat");
580 if (IS_ERR(par->vbat_reg)) { 580 if (IS_ERR(par->vbat_reg)) {
581 dev_err(&client->dev, "failed to get VBAT regulator: %ld\n",
582 PTR_ERR(par->vbat_reg));
583 ret = PTR_ERR(par->vbat_reg); 581 ret = PTR_ERR(par->vbat_reg);
584 goto fb_alloc_error; 582 if (ret == -ENODEV) {
583 par->vbat_reg = NULL;
584 } else {
585 dev_err(&client->dev, "failed to get VBAT regulator: %d\n",
586 ret);
587 goto fb_alloc_error;
588 }
585 } 589 }
586 590
587 if (of_property_read_u32(node, "solomon,width", &par->width)) 591 if (of_property_read_u32(node, "solomon,width", &par->width))
@@ -668,10 +672,13 @@ static int ssd1307fb_probe(struct i2c_client *client,
668 udelay(4); 672 udelay(4);
669 } 673 }
670 674
671 ret = regulator_enable(par->vbat_reg); 675 if (par->vbat_reg) {
672 if (ret) { 676 ret = regulator_enable(par->vbat_reg);
673 dev_err(&client->dev, "failed to enable VBAT: %d\n", ret); 677 if (ret) {
674 goto reset_oled_error; 678 dev_err(&client->dev, "failed to enable VBAT: %d\n",
679 ret);
680 goto reset_oled_error;
681 }
675 } 682 }
676 683
677 ret = ssd1307fb_init(par); 684 ret = ssd1307fb_init(par);
@@ -710,7 +717,8 @@ panel_init_error:
710 pwm_put(par->pwm); 717 pwm_put(par->pwm);
711 }; 718 };
712regulator_enable_error: 719regulator_enable_error:
713 regulator_disable(par->vbat_reg); 720 if (par->vbat_reg)
721 regulator_disable(par->vbat_reg);
714reset_oled_error: 722reset_oled_error:
715 fb_deferred_io_cleanup(info); 723 fb_deferred_io_cleanup(info);
716fb_alloc_error: 724fb_alloc_error:
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index d0115a7af0a9..3ee309c50b2d 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -643,7 +643,6 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
643 break; 643 break;
644 644
645 case XenbusStateInitWait: 645 case XenbusStateInitWait:
646InitWait:
647 xenbus_switch_state(dev, XenbusStateConnected); 646 xenbus_switch_state(dev, XenbusStateConnected);
648 break; 647 break;
649 648
@@ -654,7 +653,8 @@ InitWait:
654 * get Connected twice here. 653 * get Connected twice here.
655 */ 654 */
656 if (dev->state != XenbusStateConnected) 655 if (dev->state != XenbusStateConnected)
657 goto InitWait; /* no InitWait seen yet, fudge it */ 656 /* no InitWait seen yet, fudge it */
657 xenbus_switch_state(dev, XenbusStateConnected);
658 658
659 if (xenbus_read_unsigned(info->xbdev->otherend, 659 if (xenbus_read_unsigned(info->xbdev->otherend,
660 "request-update", 0)) 660 "request-update", 0))
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 400d70b69379..48230a5e12f2 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -232,6 +232,12 @@ static int virtio_dev_probe(struct device *_d)
232 if (device_features & (1ULL << i)) 232 if (device_features & (1ULL << i))
233 __virtio_set_bit(dev, i); 233 __virtio_set_bit(dev, i);
234 234
235 if (drv->validate) {
236 err = drv->validate(dev);
237 if (err)
238 goto err;
239 }
240
235 err = virtio_finalize_features(dev); 241 err = virtio_finalize_features(dev);
236 if (err) 242 if (err)
237 goto err; 243 goto err;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 4e1191508228..34adf9b9c053 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -242,11 +242,11 @@ static inline void update_stat(struct virtio_balloon *vb, int idx,
242 242
243#define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT) 243#define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
244 244
245static void update_balloon_stats(struct virtio_balloon *vb) 245static unsigned int update_balloon_stats(struct virtio_balloon *vb)
246{ 246{
247 unsigned long events[NR_VM_EVENT_ITEMS]; 247 unsigned long events[NR_VM_EVENT_ITEMS];
248 struct sysinfo i; 248 struct sysinfo i;
249 int idx = 0; 249 unsigned int idx = 0;
250 long available; 250 long available;
251 251
252 all_vm_events(events); 252 all_vm_events(events);
@@ -254,18 +254,22 @@ static void update_balloon_stats(struct virtio_balloon *vb)
254 254
255 available = si_mem_available(); 255 available = si_mem_available();
256 256
257#ifdef CONFIG_VM_EVENT_COUNTERS
257 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN, 258 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
258 pages_to_bytes(events[PSWPIN])); 259 pages_to_bytes(events[PSWPIN]));
259 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT, 260 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
260 pages_to_bytes(events[PSWPOUT])); 261 pages_to_bytes(events[PSWPOUT]));
261 update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]); 262 update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
262 update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]); 263 update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
264#endif
263 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE, 265 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
264 pages_to_bytes(i.freeram)); 266 pages_to_bytes(i.freeram));
265 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT, 267 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
266 pages_to_bytes(i.totalram)); 268 pages_to_bytes(i.totalram));
267 update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL, 269 update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL,
268 pages_to_bytes(available)); 270 pages_to_bytes(available));
271
272 return idx;
269} 273}
270 274
271/* 275/*
@@ -291,14 +295,14 @@ static void stats_handle_request(struct virtio_balloon *vb)
291{ 295{
292 struct virtqueue *vq; 296 struct virtqueue *vq;
293 struct scatterlist sg; 297 struct scatterlist sg;
294 unsigned int len; 298 unsigned int len, num_stats;
295 299
296 update_balloon_stats(vb); 300 num_stats = update_balloon_stats(vb);
297 301
298 vq = vb->stats_vq; 302 vq = vb->stats_vq;
299 if (!virtqueue_get_buf(vq, &len)) 303 if (!virtqueue_get_buf(vq, &len))
300 return; 304 return;
301 sg_init_one(&sg, vb->stats, sizeof(vb->stats)); 305 sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
302 virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); 306 virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
303 virtqueue_kick(vq); 307 virtqueue_kick(vq);
304} 308}
@@ -423,13 +427,16 @@ static int init_vqs(struct virtio_balloon *vb)
423 vb->deflate_vq = vqs[1]; 427 vb->deflate_vq = vqs[1];
424 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { 428 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
425 struct scatterlist sg; 429 struct scatterlist sg;
430 unsigned int num_stats;
426 vb->stats_vq = vqs[2]; 431 vb->stats_vq = vqs[2];
427 432
428 /* 433 /*
429 * Prime this virtqueue with one buffer so the hypervisor can 434 * Prime this virtqueue with one buffer so the hypervisor can
430 * use it to signal us later (it can't be broken yet!). 435 * use it to signal us later (it can't be broken yet!).
431 */ 436 */
432 sg_init_one(&sg, vb->stats, sizeof vb->stats); 437 num_stats = update_balloon_stats(vb);
438
439 sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
433 if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL) 440 if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
434 < 0) 441 < 0)
435 BUG(); 442 BUG();
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index df548a6fb844..698d5d06fa03 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -33,8 +33,10 @@ void vp_synchronize_vectors(struct virtio_device *vdev)
33 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 33 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
34 int i; 34 int i;
35 35
36 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, 0)); 36 if (vp_dev->intx_enabled)
37 for (i = 1; i < vp_dev->msix_vectors; i++) 37 synchronize_irq(vp_dev->pci_dev->irq);
38
39 for (i = 0; i < vp_dev->msix_vectors; ++i)
38 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i)); 40 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
39} 41}
40 42
@@ -60,13 +62,16 @@ static irqreturn_t vp_config_changed(int irq, void *opaque)
60static irqreturn_t vp_vring_interrupt(int irq, void *opaque) 62static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
61{ 63{
62 struct virtio_pci_device *vp_dev = opaque; 64 struct virtio_pci_device *vp_dev = opaque;
65 struct virtio_pci_vq_info *info;
63 irqreturn_t ret = IRQ_NONE; 66 irqreturn_t ret = IRQ_NONE;
64 struct virtqueue *vq; 67 unsigned long flags;
65 68
66 list_for_each_entry(vq, &vp_dev->vdev.vqs, list) { 69 spin_lock_irqsave(&vp_dev->lock, flags);
67 if (vq->callback && vring_interrupt(irq, vq) == IRQ_HANDLED) 70 list_for_each_entry(info, &vp_dev->virtqueues, node) {
71 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
68 ret = IRQ_HANDLED; 72 ret = IRQ_HANDLED;
69 } 73 }
74 spin_unlock_irqrestore(&vp_dev->lock, flags);
70 75
71 return ret; 76 return ret;
72} 77}
@@ -97,185 +102,244 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
97 return vp_vring_interrupt(irq, opaque); 102 return vp_vring_interrupt(irq, opaque);
98} 103}
99 104
100static void vp_remove_vqs(struct virtio_device *vdev) 105static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
106 bool per_vq_vectors, struct irq_affinity *desc)
101{ 107{
102 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 108 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
103 struct virtqueue *vq, *n; 109 const char *name = dev_name(&vp_dev->vdev.dev);
110 unsigned i, v;
111 int err = -ENOMEM;
104 112
105 list_for_each_entry_safe(vq, n, &vdev->vqs, list) { 113 vp_dev->msix_vectors = nvectors;
106 if (vp_dev->msix_vector_map) {
107 int v = vp_dev->msix_vector_map[vq->index];
108 114
109 if (v != VIRTIO_MSI_NO_VECTOR) 115 vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
110 free_irq(pci_irq_vector(vp_dev->pci_dev, v), 116 GFP_KERNEL);
111 vq); 117 if (!vp_dev->msix_names)
112 } 118 goto error;
113 vp_dev->del_vq(vq); 119 vp_dev->msix_affinity_masks
120 = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
121 GFP_KERNEL);
122 if (!vp_dev->msix_affinity_masks)
123 goto error;
124 for (i = 0; i < nvectors; ++i)
125 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
126 GFP_KERNEL))
127 goto error;
128
129 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
130 nvectors, PCI_IRQ_MSIX |
131 (desc ? PCI_IRQ_AFFINITY : 0),
132 desc);
133 if (err < 0)
134 goto error;
135 vp_dev->msix_enabled = 1;
136
137 /* Set the vector used for configuration */
138 v = vp_dev->msix_used_vectors;
139 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
140 "%s-config", name);
141 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
142 vp_config_changed, 0, vp_dev->msix_names[v],
143 vp_dev);
144 if (err)
145 goto error;
146 ++vp_dev->msix_used_vectors;
147
148 v = vp_dev->config_vector(vp_dev, v);
149 /* Verify we had enough resources to assign the vector */
150 if (v == VIRTIO_MSI_NO_VECTOR) {
151 err = -EBUSY;
152 goto error;
114 } 153 }
154
155 if (!per_vq_vectors) {
156 /* Shared vector for all VQs */
157 v = vp_dev->msix_used_vectors;
158 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
159 "%s-virtqueues", name);
160 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
161 vp_vring_interrupt, 0, vp_dev->msix_names[v],
162 vp_dev);
163 if (err)
164 goto error;
165 ++vp_dev->msix_used_vectors;
166 }
167 return 0;
168error:
169 return err;
170}
171
172static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
173 void (*callback)(struct virtqueue *vq),
174 const char *name,
175 u16 msix_vec)
176{
177 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
178 struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
179 struct virtqueue *vq;
180 unsigned long flags;
181
182 /* fill out our structure that represents an active queue */
183 if (!info)
184 return ERR_PTR(-ENOMEM);
185
186 vq = vp_dev->setup_vq(vp_dev, info, index, callback, name,
187 msix_vec);
188 if (IS_ERR(vq))
189 goto out_info;
190
191 info->vq = vq;
192 if (callback) {
193 spin_lock_irqsave(&vp_dev->lock, flags);
194 list_add(&info->node, &vp_dev->virtqueues);
195 spin_unlock_irqrestore(&vp_dev->lock, flags);
196 } else {
197 INIT_LIST_HEAD(&info->node);
198 }
199
200 vp_dev->vqs[index] = info;
201 return vq;
202
203out_info:
204 kfree(info);
205 return vq;
206}
207
208static void vp_del_vq(struct virtqueue *vq)
209{
210 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
211 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
212 unsigned long flags;
213
214 spin_lock_irqsave(&vp_dev->lock, flags);
215 list_del(&info->node);
216 spin_unlock_irqrestore(&vp_dev->lock, flags);
217
218 vp_dev->del_vq(info);
219 kfree(info);
115} 220}
116 221
117/* the config->del_vqs() implementation */ 222/* the config->del_vqs() implementation */
118void vp_del_vqs(struct virtio_device *vdev) 223void vp_del_vqs(struct virtio_device *vdev)
119{ 224{
120 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 225 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
226 struct virtqueue *vq, *n;
121 int i; 227 int i;
122 228
123 if (WARN_ON_ONCE(list_empty_careful(&vdev->vqs))) 229 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
124 return; 230 if (vp_dev->per_vq_vectors) {
231 int v = vp_dev->vqs[vq->index]->msix_vector;
125 232
126 vp_remove_vqs(vdev); 233 if (v != VIRTIO_MSI_NO_VECTOR) {
234 int irq = pci_irq_vector(vp_dev->pci_dev, v);
235
236 irq_set_affinity_hint(irq, NULL);
237 free_irq(irq, vq);
238 }
239 }
240 vp_del_vq(vq);
241 }
242 vp_dev->per_vq_vectors = false;
243
244 if (vp_dev->intx_enabled) {
245 free_irq(vp_dev->pci_dev->irq, vp_dev);
246 vp_dev->intx_enabled = 0;
247 }
127 248
128 if (vp_dev->pci_dev->msix_enabled) { 249 for (i = 0; i < vp_dev->msix_used_vectors; ++i)
129 for (i = 0; i < vp_dev->msix_vectors; i++) 250 free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
251
252 for (i = 0; i < vp_dev->msix_vectors; i++)
253 if (vp_dev->msix_affinity_masks[i])
130 free_cpumask_var(vp_dev->msix_affinity_masks[i]); 254 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
131 255
256 if (vp_dev->msix_enabled) {
132 /* Disable the vector used for configuration */ 257 /* Disable the vector used for configuration */
133 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR); 258 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
134 259
135 kfree(vp_dev->msix_affinity_masks); 260 pci_free_irq_vectors(vp_dev->pci_dev);
136 kfree(vp_dev->msix_names); 261 vp_dev->msix_enabled = 0;
137 kfree(vp_dev->msix_vector_map);
138 } 262 }
139 263
140 free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev); 264 vp_dev->msix_vectors = 0;
141 pci_free_irq_vectors(vp_dev->pci_dev); 265 vp_dev->msix_used_vectors = 0;
266 kfree(vp_dev->msix_names);
267 vp_dev->msix_names = NULL;
268 kfree(vp_dev->msix_affinity_masks);
269 vp_dev->msix_affinity_masks = NULL;
270 kfree(vp_dev->vqs);
271 vp_dev->vqs = NULL;
142} 272}
143 273
144static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, 274static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
145 struct virtqueue *vqs[], vq_callback_t *callbacks[], 275 struct virtqueue *vqs[], vq_callback_t *callbacks[],
146 const char * const names[], struct irq_affinity *desc) 276 const char * const names[], bool per_vq_vectors,
277 struct irq_affinity *desc)
147{ 278{
148 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 279 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
149 const char *name = dev_name(&vp_dev->vdev.dev);
150 int i, err = -ENOMEM, allocated_vectors, nvectors;
151 unsigned flags = PCI_IRQ_MSIX;
152 bool shared = false;
153 u16 msix_vec; 280 u16 msix_vec;
281 int i, err, nvectors, allocated_vectors;
154 282
155 if (desc) { 283 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
156 flags |= PCI_IRQ_AFFINITY; 284 if (!vp_dev->vqs)
157 desc->pre_vectors++; /* virtio config vector */ 285 return -ENOMEM;
158 }
159
160 nvectors = 1;
161 for (i = 0; i < nvqs; i++)
162 if (callbacks[i])
163 nvectors++;
164
165 /* Try one vector per queue first. */
166 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
167 nvectors, flags, desc);
168 if (err < 0) {
169 /* Fallback to one vector for config, one shared for queues. */
170 shared = true;
171 err = pci_alloc_irq_vectors(vp_dev->pci_dev, 2, 2,
172 PCI_IRQ_MSIX);
173 if (err < 0)
174 return err;
175 }
176 if (err < 0)
177 return err;
178
179 vp_dev->msix_vectors = nvectors;
180 vp_dev->msix_names = kmalloc_array(nvectors,
181 sizeof(*vp_dev->msix_names), GFP_KERNEL);
182 if (!vp_dev->msix_names)
183 goto out_free_irq_vectors;
184
185 vp_dev->msix_affinity_masks = kcalloc(nvectors,
186 sizeof(*vp_dev->msix_affinity_masks), GFP_KERNEL);
187 if (!vp_dev->msix_affinity_masks)
188 goto out_free_msix_names;
189 286
190 for (i = 0; i < nvectors; ++i) { 287 if (per_vq_vectors) {
191 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], 288 /* Best option: one for change interrupt, one per vq. */
192 GFP_KERNEL)) 289 nvectors = 1;
193 goto out_free_msix_affinity_masks; 290 for (i = 0; i < nvqs; ++i)
291 if (callbacks[i])
292 ++nvectors;
293 } else {
294 /* Second best: one for change, shared for all vqs. */
295 nvectors = 2;
194 } 296 }
195 297
196 /* Set the vector used for configuration */ 298 err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
197 snprintf(vp_dev->msix_names[0], sizeof(*vp_dev->msix_names), 299 per_vq_vectors ? desc : NULL);
198 "%s-config", name);
199 err = request_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_config_changed,
200 0, vp_dev->msix_names[0], vp_dev);
201 if (err) 300 if (err)
202 goto out_free_msix_affinity_masks; 301 goto error_find;
203 302
204 /* Verify we had enough resources to assign the vector */ 303 vp_dev->per_vq_vectors = per_vq_vectors;
205 if (vp_dev->config_vector(vp_dev, 0) == VIRTIO_MSI_NO_VECTOR) { 304 allocated_vectors = vp_dev->msix_used_vectors;
206 err = -EBUSY;
207 goto out_free_config_irq;
208 }
209
210 vp_dev->msix_vector_map = kmalloc_array(nvqs,
211 sizeof(*vp_dev->msix_vector_map), GFP_KERNEL);
212 if (!vp_dev->msix_vector_map)
213 goto out_disable_config_irq;
214
215 allocated_vectors = 1; /* vector 0 is the config interrupt */
216 for (i = 0; i < nvqs; ++i) { 305 for (i = 0; i < nvqs; ++i) {
217 if (!names[i]) { 306 if (!names[i]) {
218 vqs[i] = NULL; 307 vqs[i] = NULL;
219 continue; 308 continue;
220 } 309 }
221 310
222 if (callbacks[i]) 311 if (!callbacks[i])
223 msix_vec = allocated_vectors;
224 else
225 msix_vec = VIRTIO_MSI_NO_VECTOR; 312 msix_vec = VIRTIO_MSI_NO_VECTOR;
226 313 else if (vp_dev->per_vq_vectors)
227 vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i], 314 msix_vec = allocated_vectors++;
228 msix_vec); 315 else
316 msix_vec = VP_MSIX_VQ_VECTOR;
317 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
318 msix_vec);
229 if (IS_ERR(vqs[i])) { 319 if (IS_ERR(vqs[i])) {
230 err = PTR_ERR(vqs[i]); 320 err = PTR_ERR(vqs[i]);
231 goto out_remove_vqs; 321 goto error_find;
232 } 322 }
233 323
234 if (msix_vec == VIRTIO_MSI_NO_VECTOR) { 324 if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
235 vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
236 continue; 325 continue;
237 }
238 326
239 snprintf(vp_dev->msix_names[i + 1], 327 /* allocate per-vq irq if available and necessary */
240 sizeof(*vp_dev->msix_names), "%s-%s", 328 snprintf(vp_dev->msix_names[msix_vec],
329 sizeof *vp_dev->msix_names,
330 "%s-%s",
241 dev_name(&vp_dev->vdev.dev), names[i]); 331 dev_name(&vp_dev->vdev.dev), names[i]);
242 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec), 332 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
243 vring_interrupt, IRQF_SHARED, 333 vring_interrupt, 0,
244 vp_dev->msix_names[i + 1], vqs[i]); 334 vp_dev->msix_names[msix_vec],
245 if (err) { 335 vqs[i]);
246 /* don't free this irq on error */ 336 if (err)
247 vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; 337 goto error_find;
248 goto out_remove_vqs;
249 }
250 vp_dev->msix_vector_map[i] = msix_vec;
251
252 /*
253 * Use a different vector for each queue if they are available,
254 * else share the same vector for all VQs.
255 */
256 if (!shared)
257 allocated_vectors++;
258 } 338 }
259
260 return 0; 339 return 0;
261 340
262out_remove_vqs: 341error_find:
263 vp_remove_vqs(vdev); 342 vp_del_vqs(vdev);
264 kfree(vp_dev->msix_vector_map);
265out_disable_config_irq:
266 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
267out_free_config_irq:
268 free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
269out_free_msix_affinity_masks:
270 for (i = 0; i < nvectors; i++) {
271 if (vp_dev->msix_affinity_masks[i])
272 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
273 }
274 kfree(vp_dev->msix_affinity_masks);
275out_free_msix_names:
276 kfree(vp_dev->msix_names);
277out_free_irq_vectors:
278 pci_free_irq_vectors(vp_dev->pci_dev);
279 return err; 343 return err;
280} 344}
281 345
@@ -286,29 +350,33 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
286 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 350 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
287 int i, err; 351 int i, err;
288 352
353 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
354 if (!vp_dev->vqs)
355 return -ENOMEM;
356
289 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, 357 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
290 dev_name(&vdev->dev), vp_dev); 358 dev_name(&vdev->dev), vp_dev);
291 if (err) 359 if (err)
292 return err; 360 goto out_del_vqs;
293 361
362 vp_dev->intx_enabled = 1;
363 vp_dev->per_vq_vectors = false;
294 for (i = 0; i < nvqs; ++i) { 364 for (i = 0; i < nvqs; ++i) {
295 if (!names[i]) { 365 if (!names[i]) {
296 vqs[i] = NULL; 366 vqs[i] = NULL;
297 continue; 367 continue;
298 } 368 }
299 vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i], 369 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
300 VIRTIO_MSI_NO_VECTOR); 370 VIRTIO_MSI_NO_VECTOR);
301 if (IS_ERR(vqs[i])) { 371 if (IS_ERR(vqs[i])) {
302 err = PTR_ERR(vqs[i]); 372 err = PTR_ERR(vqs[i]);
303 goto out_remove_vqs; 373 goto out_del_vqs;
304 } 374 }
305 } 375 }
306 376
307 return 0; 377 return 0;
308 378out_del_vqs:
309out_remove_vqs: 379 vp_del_vqs(vdev);
310 vp_remove_vqs(vdev);
311 free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
312 return err; 380 return err;
313} 381}
314 382
@@ -319,9 +387,15 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
319{ 387{
320 int err; 388 int err;
321 389
322 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, desc); 390 /* Try MSI-X with one vector per queue. */
391 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, desc);
323 if (!err) 392 if (!err)
324 return 0; 393 return 0;
394 /* Fallback: MSI-X with one vector for config, one shared for queues. */
395 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, desc);
396 if (!err)
397 return 0;
398 /* Finally fall back to regular interrupts. */
325 return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names); 399 return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names);
326} 400}
327 401
@@ -341,15 +415,16 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
341{ 415{
342 struct virtio_device *vdev = vq->vdev; 416 struct virtio_device *vdev = vq->vdev;
343 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 417 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
418 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
419 struct cpumask *mask;
420 unsigned int irq;
344 421
345 if (!vq->callback) 422 if (!vq->callback)
346 return -EINVAL; 423 return -EINVAL;
347 424
348 if (vp_dev->pci_dev->msix_enabled) { 425 if (vp_dev->msix_enabled) {
349 int vec = vp_dev->msix_vector_map[vq->index]; 426 mask = vp_dev->msix_affinity_masks[info->msix_vector];
350 struct cpumask *mask = vp_dev->msix_affinity_masks[vec]; 427 irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
351 unsigned int irq = pci_irq_vector(vp_dev->pci_dev, vec);
352
353 if (cpu == -1) 428 if (cpu == -1)
354 irq_set_affinity_hint(irq, NULL); 429 irq_set_affinity_hint(irq, NULL);
355 else { 430 else {
@@ -364,12 +439,13 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
364const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index) 439const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
365{ 440{
366 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 441 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
367 unsigned int *map = vp_dev->msix_vector_map;
368 442
369 if (!map || map[index] == VIRTIO_MSI_NO_VECTOR) 443 if (!vp_dev->per_vq_vectors ||
444 vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
370 return NULL; 445 return NULL;
371 446
372 return pci_irq_get_affinity(vp_dev->pci_dev, map[index]); 447 return pci_irq_get_affinity(vp_dev->pci_dev,
448 vp_dev->vqs[index]->msix_vector);
373} 449}
374 450
375#ifdef CONFIG_PM_SLEEP 451#ifdef CONFIG_PM_SLEEP
@@ -440,6 +516,8 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
440 vp_dev->vdev.dev.parent = &pci_dev->dev; 516 vp_dev->vdev.dev.parent = &pci_dev->dev;
441 vp_dev->vdev.dev.release = virtio_pci_release_dev; 517 vp_dev->vdev.dev.release = virtio_pci_release_dev;
442 vp_dev->pci_dev = pci_dev; 518 vp_dev->pci_dev = pci_dev;
519 INIT_LIST_HEAD(&vp_dev->virtqueues);
520 spin_lock_init(&vp_dev->lock);
443 521
444 /* enable the device */ 522 /* enable the device */
445 rc = pci_enable_device(pci_dev); 523 rc = pci_enable_device(pci_dev);
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index ac8c9d788964..e96334aec1e0 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -31,6 +31,17 @@
31#include <linux/highmem.h> 31#include <linux/highmem.h>
32#include <linux/spinlock.h> 32#include <linux/spinlock.h>
33 33
34struct virtio_pci_vq_info {
35 /* the actual virtqueue */
36 struct virtqueue *vq;
37
38 /* the list node for the virtqueues list */
39 struct list_head node;
40
41 /* MSI-X vector (or none) */
42 unsigned msix_vector;
43};
44
34/* Our device structure */ 45/* Our device structure */
35struct virtio_pci_device { 46struct virtio_pci_device {
36 struct virtio_device vdev; 47 struct virtio_device vdev;
@@ -64,25 +75,47 @@ struct virtio_pci_device {
64 /* the IO mapping for the PCI config space */ 75 /* the IO mapping for the PCI config space */
65 void __iomem *ioaddr; 76 void __iomem *ioaddr;
66 77
78 /* a list of queues so we can dispatch IRQs */
79 spinlock_t lock;
80 struct list_head virtqueues;
81
82 /* array of all queues for house-keeping */
83 struct virtio_pci_vq_info **vqs;
84
85 /* MSI-X support */
86 int msix_enabled;
87 int intx_enabled;
67 cpumask_var_t *msix_affinity_masks; 88 cpumask_var_t *msix_affinity_masks;
68 /* Name strings for interrupts. This size should be enough, 89 /* Name strings for interrupts. This size should be enough,
69 * and I'm too lazy to allocate each name separately. */ 90 * and I'm too lazy to allocate each name separately. */
70 char (*msix_names)[256]; 91 char (*msix_names)[256];
71 /* Total Number of MSI-X vectors (including per-VQ ones). */ 92 /* Number of available vectors */
72 int msix_vectors; 93 unsigned msix_vectors;
73 /* Map of per-VQ MSI-X vectors, may be NULL */ 94 /* Vectors allocated, excluding per-vq vectors if any */
74 unsigned *msix_vector_map; 95 unsigned msix_used_vectors;
96
97 /* Whether we have vector per vq */
98 bool per_vq_vectors;
75 99
76 struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev, 100 struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev,
101 struct virtio_pci_vq_info *info,
77 unsigned idx, 102 unsigned idx,
78 void (*callback)(struct virtqueue *vq), 103 void (*callback)(struct virtqueue *vq),
79 const char *name, 104 const char *name,
80 u16 msix_vec); 105 u16 msix_vec);
81 void (*del_vq)(struct virtqueue *vq); 106 void (*del_vq)(struct virtio_pci_vq_info *info);
82 107
83 u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector); 108 u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
84}; 109};
85 110
111/* Constants for MSI-X */
112/* Use first vector for configuration changes, second and the rest for
113 * virtqueues Thus, we need at least 2 vectors for MSI. */
114enum {
115 VP_MSIX_CONFIG_VECTOR = 0,
116 VP_MSIX_VQ_VECTOR = 1,
117};
118
86/* Convert a generic virtio device to our structure */ 119/* Convert a generic virtio device to our structure */
87static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) 120static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
88{ 121{
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index f7362c5fe18a..4bfa48fb1324 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -112,6 +112,7 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
112} 112}
113 113
114static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, 114static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
115 struct virtio_pci_vq_info *info,
115 unsigned index, 116 unsigned index,
116 void (*callback)(struct virtqueue *vq), 117 void (*callback)(struct virtqueue *vq),
117 const char *name, 118 const char *name,
@@ -129,6 +130,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
129 if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) 130 if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
130 return ERR_PTR(-ENOENT); 131 return ERR_PTR(-ENOENT);
131 132
133 info->msix_vector = msix_vec;
134
132 /* create the vring */ 135 /* create the vring */
133 vq = vring_create_virtqueue(index, num, 136 vq = vring_create_virtqueue(index, num,
134 VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev, 137 VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
@@ -159,13 +162,14 @@ out_deactivate:
159 return ERR_PTR(err); 162 return ERR_PTR(err);
160} 163}
161 164
162static void del_vq(struct virtqueue *vq) 165static void del_vq(struct virtio_pci_vq_info *info)
163{ 166{
167 struct virtqueue *vq = info->vq;
164 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 168 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
165 169
166 iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); 170 iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
167 171
168 if (vp_dev->pci_dev->msix_enabled) { 172 if (vp_dev->msix_enabled) {
169 iowrite16(VIRTIO_MSI_NO_VECTOR, 173 iowrite16(VIRTIO_MSI_NO_VECTOR,
170 vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); 174 vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
171 /* Flush the write out to device */ 175 /* Flush the write out to device */
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 7bc3004b840e..8978f109d2d7 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -293,6 +293,7 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
293} 293}
294 294
295static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, 295static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
296 struct virtio_pci_vq_info *info,
296 unsigned index, 297 unsigned index,
297 void (*callback)(struct virtqueue *vq), 298 void (*callback)(struct virtqueue *vq),
298 const char *name, 299 const char *name,
@@ -322,6 +323,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
322 /* get offset of notification word for this vq */ 323 /* get offset of notification word for this vq */
323 off = vp_ioread16(&cfg->queue_notify_off); 324 off = vp_ioread16(&cfg->queue_notify_off);
324 325
326 info->msix_vector = msix_vec;
327
325 /* create the vring */ 328 /* create the vring */
326 vq = vring_create_virtqueue(index, num, 329 vq = vring_create_virtqueue(index, num,
327 SMP_CACHE_BYTES, &vp_dev->vdev, 330 SMP_CACHE_BYTES, &vp_dev->vdev,
@@ -405,13 +408,14 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
405 return 0; 408 return 0;
406} 409}
407 410
408static void del_vq(struct virtqueue *vq) 411static void del_vq(struct virtio_pci_vq_info *info)
409{ 412{
413 struct virtqueue *vq = info->vq;
410 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 414 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
411 415
412 vp_iowrite16(vq->index, &vp_dev->common->queue_select); 416 vp_iowrite16(vq->index, &vp_dev->common->queue_select);
413 417
414 if (vp_dev->pci_dev->msix_enabled) { 418 if (vp_dev->msix_enabled) {
415 vp_iowrite16(VIRTIO_MSI_NO_VECTOR, 419 vp_iowrite16(VIRTIO_MSI_NO_VECTOR,
416 &vp_dev->common->queue_msix_vector); 420 &vp_dev->common->queue_msix_vector);
417 /* Flush the write out to device */ 421 /* Flush the write out to device */
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 1f4733b80c87..f3b089b7c0b6 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -442,8 +442,10 @@ static int xenbus_write_transaction(unsigned msg_type,
442 return xenbus_command_reply(u, XS_ERROR, "ENOENT"); 442 return xenbus_command_reply(u, XS_ERROR, "ENOENT");
443 443
444 rc = xenbus_dev_request_and_reply(&u->u.msg, u); 444 rc = xenbus_dev_request_and_reply(&u->u.msg, u);
445 if (rc) 445 if (rc && trans) {
446 list_del(&trans->list);
446 kfree(trans); 447 kfree(trans);
448 }
447 449
448out: 450out:
449 return rc; 451 return rc;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 29b7fc28c607..c4115901d906 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1259,7 +1259,7 @@ struct btrfs_root {
1259 atomic_t will_be_snapshoted; 1259 atomic_t will_be_snapshoted;
1260 1260
1261 /* For qgroup metadata space reserve */ 1261 /* For qgroup metadata space reserve */
1262 atomic_t qgroup_meta_rsv; 1262 atomic64_t qgroup_meta_rsv;
1263}; 1263};
1264static inline u32 btrfs_inode_sectorsize(const struct inode *inode) 1264static inline u32 btrfs_inode_sectorsize(const struct inode *inode)
1265{ 1265{
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 08b74daf35d0..eb1ee7b6f532 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1342,7 +1342,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1342 atomic_set(&root->orphan_inodes, 0); 1342 atomic_set(&root->orphan_inodes, 0);
1343 atomic_set(&root->refs, 1); 1343 atomic_set(&root->refs, 1);
1344 atomic_set(&root->will_be_snapshoted, 0); 1344 atomic_set(&root->will_be_snapshoted, 0);
1345 atomic_set(&root->qgroup_meta_rsv, 0); 1345 atomic64_set(&root->qgroup_meta_rsv, 0);
1346 root->log_transid = 0; 1346 root->log_transid = 0;
1347 root->log_transid_committed = -1; 1347 root->log_transid_committed = -1;
1348 root->last_log_commit = 0; 1348 root->last_log_commit = 0;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 8df797432740..27fdb250b446 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2584,26 +2584,36 @@ static void end_bio_extent_readpage(struct bio *bio)
2584 2584
2585 if (tree->ops) { 2585 if (tree->ops) {
2586 ret = tree->ops->readpage_io_failed_hook(page, mirror); 2586 ret = tree->ops->readpage_io_failed_hook(page, mirror);
2587 if (!ret && !bio->bi_error) 2587 if (ret == -EAGAIN) {
2588 uptodate = 1; 2588 /*
2589 } else { 2589 * Data inode's readpage_io_failed_hook() always
2590 * returns -EAGAIN.
2591 *
2592 * The generic bio_readpage_error handles errors
2593 * the following way: If possible, new read
2594 * requests are created and submitted and will
2595 * end up in end_bio_extent_readpage as well (if
2596 * we're lucky, not in the !uptodate case). In
2597 * that case it returns 0 and we just go on with
2598 * the next page in our bio. If it can't handle
2599 * the error it will return -EIO and we remain
2600 * responsible for that page.
2601 */
2602 ret = bio_readpage_error(bio, offset, page,
2603 start, end, mirror);
2604 if (ret == 0) {
2605 uptodate = !bio->bi_error;
2606 offset += len;
2607 continue;
2608 }
2609 }
2610
2590 /* 2611 /*
2591 * The generic bio_readpage_error handles errors the 2612 * metadata's readpage_io_failed_hook() always returns
2592 * following way: If possible, new read requests are 2613 * -EIO and fixes nothing. -EIO is also returned if
2593 * created and submitted and will end up in 2614 * data inode error could not be fixed.
2594 * end_bio_extent_readpage as well (if we're lucky, not
2595 * in the !uptodate case). In that case it returns 0 and
2596 * we just go on with the next page in our bio. If it
2597 * can't handle the error it will return -EIO and we
2598 * remain responsible for that page.
2599 */ 2615 */
2600 ret = bio_readpage_error(bio, offset, page, start, end, 2616 ASSERT(ret == -EIO);
2601 mirror);
2602 if (ret == 0) {
2603 uptodate = !bio->bi_error;
2604 offset += len;
2605 continue;
2606 }
2607 } 2617 }
2608readpage_ok: 2618readpage_ok:
2609 if (likely(uptodate)) { 2619 if (likely(uptodate)) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 231503935652..5e71f1ea3391 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7910,7 +7910,6 @@ struct btrfs_retry_complete {
7910static void btrfs_retry_endio_nocsum(struct bio *bio) 7910static void btrfs_retry_endio_nocsum(struct bio *bio)
7911{ 7911{
7912 struct btrfs_retry_complete *done = bio->bi_private; 7912 struct btrfs_retry_complete *done = bio->bi_private;
7913 struct inode *inode;
7914 struct bio_vec *bvec; 7913 struct bio_vec *bvec;
7915 int i; 7914 int i;
7916 7915
@@ -7918,12 +7917,12 @@ static void btrfs_retry_endio_nocsum(struct bio *bio)
7918 goto end; 7917 goto end;
7919 7918
7920 ASSERT(bio->bi_vcnt == 1); 7919 ASSERT(bio->bi_vcnt == 1);
7921 inode = bio->bi_io_vec->bv_page->mapping->host; 7920 ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(done->inode));
7922 ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
7923 7921
7924 done->uptodate = 1; 7922 done->uptodate = 1;
7925 bio_for_each_segment_all(bvec, bio, i) 7923 bio_for_each_segment_all(bvec, bio, i)
7926 clean_io_failure(BTRFS_I(done->inode), done->start, bvec->bv_page, 0); 7924 clean_io_failure(BTRFS_I(done->inode), done->start,
7925 bvec->bv_page, 0);
7927end: 7926end:
7928 complete(&done->done); 7927 complete(&done->done);
7929 bio_put(bio); 7928 bio_put(bio);
@@ -7973,8 +7972,10 @@ next_block_or_try_again:
7973 7972
7974 start += sectorsize; 7973 start += sectorsize;
7975 7974
7976 if (nr_sectors--) { 7975 nr_sectors--;
7976 if (nr_sectors) {
7977 pgoff += sectorsize; 7977 pgoff += sectorsize;
7978 ASSERT(pgoff < PAGE_SIZE);
7978 goto next_block_or_try_again; 7979 goto next_block_or_try_again;
7979 } 7980 }
7980 } 7981 }
@@ -7986,9 +7987,7 @@ static void btrfs_retry_endio(struct bio *bio)
7986{ 7987{
7987 struct btrfs_retry_complete *done = bio->bi_private; 7988 struct btrfs_retry_complete *done = bio->bi_private;
7988 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 7989 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
7989 struct inode *inode;
7990 struct bio_vec *bvec; 7990 struct bio_vec *bvec;
7991 u64 start;
7992 int uptodate; 7991 int uptodate;
7993 int ret; 7992 int ret;
7994 int i; 7993 int i;
@@ -7998,11 +7997,8 @@ static void btrfs_retry_endio(struct bio *bio)
7998 7997
7999 uptodate = 1; 7998 uptodate = 1;
8000 7999
8001 start = done->start;
8002
8003 ASSERT(bio->bi_vcnt == 1); 8000 ASSERT(bio->bi_vcnt == 1);
8004 inode = bio->bi_io_vec->bv_page->mapping->host; 8001 ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(done->inode));
8005 ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
8006 8002
8007 bio_for_each_segment_all(bvec, bio, i) { 8003 bio_for_each_segment_all(bvec, bio, i) {
8008 ret = __readpage_endio_check(done->inode, io_bio, i, 8004 ret = __readpage_endio_check(done->inode, io_bio, i,
@@ -8080,8 +8076,10 @@ next:
8080 8076
8081 ASSERT(nr_sectors); 8077 ASSERT(nr_sectors);
8082 8078
8083 if (--nr_sectors) { 8079 nr_sectors--;
8080 if (nr_sectors) {
8084 pgoff += sectorsize; 8081 pgoff += sectorsize;
8082 ASSERT(pgoff < PAGE_SIZE);
8085 goto next_block; 8083 goto next_block;
8086 } 8084 }
8087 } 8085 }
@@ -10523,9 +10521,9 @@ out_inode:
10523} 10521}
10524 10522
10525__attribute__((const)) 10523__attribute__((const))
10526static int dummy_readpage_io_failed_hook(struct page *page, int failed_mirror) 10524static int btrfs_readpage_io_failed_hook(struct page *page, int failed_mirror)
10527{ 10525{
10528 return 0; 10526 return -EAGAIN;
10529} 10527}
10530 10528
10531static const struct inode_operations btrfs_dir_inode_operations = { 10529static const struct inode_operations btrfs_dir_inode_operations = {
@@ -10570,7 +10568,7 @@ static const struct extent_io_ops btrfs_extent_io_ops = {
10570 .submit_bio_hook = btrfs_submit_bio_hook, 10568 .submit_bio_hook = btrfs_submit_bio_hook,
10571 .readpage_end_io_hook = btrfs_readpage_end_io_hook, 10569 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
10572 .merge_bio_hook = btrfs_merge_bio_hook, 10570 .merge_bio_hook = btrfs_merge_bio_hook,
10573 .readpage_io_failed_hook = dummy_readpage_io_failed_hook, 10571 .readpage_io_failed_hook = btrfs_readpage_io_failed_hook,
10574 10572
10575 /* optional callbacks */ 10573 /* optional callbacks */
10576 .fill_delalloc = run_delalloc_range, 10574 .fill_delalloc = run_delalloc_range,
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index a5da750c1087..a59801dc2a34 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -2948,20 +2948,20 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
2948 ret = qgroup_reserve(root, num_bytes, enforce); 2948 ret = qgroup_reserve(root, num_bytes, enforce);
2949 if (ret < 0) 2949 if (ret < 0)
2950 return ret; 2950 return ret;
2951 atomic_add(num_bytes, &root->qgroup_meta_rsv); 2951 atomic64_add(num_bytes, &root->qgroup_meta_rsv);
2952 return ret; 2952 return ret;
2953} 2953}
2954 2954
2955void btrfs_qgroup_free_meta_all(struct btrfs_root *root) 2955void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
2956{ 2956{
2957 struct btrfs_fs_info *fs_info = root->fs_info; 2957 struct btrfs_fs_info *fs_info = root->fs_info;
2958 int reserved; 2958 u64 reserved;
2959 2959
2960 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || 2960 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
2961 !is_fstree(root->objectid)) 2961 !is_fstree(root->objectid))
2962 return; 2962 return;
2963 2963
2964 reserved = atomic_xchg(&root->qgroup_meta_rsv, 0); 2964 reserved = atomic64_xchg(&root->qgroup_meta_rsv, 0);
2965 if (reserved == 0) 2965 if (reserved == 0)
2966 return; 2966 return;
2967 btrfs_qgroup_free_refroot(fs_info, root->objectid, reserved); 2967 btrfs_qgroup_free_refroot(fs_info, root->objectid, reserved);
@@ -2976,8 +2976,8 @@ void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
2976 return; 2976 return;
2977 2977
2978 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); 2978 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
2979 WARN_ON(atomic_read(&root->qgroup_meta_rsv) < num_bytes); 2979 WARN_ON(atomic64_read(&root->qgroup_meta_rsv) < num_bytes);
2980 atomic_sub(num_bytes, &root->qgroup_meta_rsv); 2980 atomic64_sub(num_bytes, &root->qgroup_meta_rsv);
2981 btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes); 2981 btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes);
2982} 2982}
2983 2983
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 456c8901489b..a60d5bfb8a49 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -6305,8 +6305,13 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
6305 goto out; 6305 goto out;
6306 } 6306 }
6307 6307
6308 /*
6309 * Check that we don't overflow at later allocations, we request
6310 * clone_sources_count + 1 items, and compare to unsigned long inside
6311 * access_ok.
6312 */
6308 if (arg->clone_sources_count > 6313 if (arg->clone_sources_count >
6309 ULLONG_MAX / sizeof(*arg->clone_sources)) { 6314 ULONG_MAX / sizeof(struct clone_root) - 1) {
6310 ret = -EINVAL; 6315 ret = -EINVAL;
6311 goto out; 6316 goto out;
6312 } 6317 }
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index da687dc79cce..9530a333d302 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -549,16 +549,19 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
549 case Opt_ssd: 549 case Opt_ssd:
550 btrfs_set_and_info(info, SSD, 550 btrfs_set_and_info(info, SSD,
551 "use ssd allocation scheme"); 551 "use ssd allocation scheme");
552 btrfs_clear_opt(info->mount_opt, NOSSD);
552 break; 553 break;
553 case Opt_ssd_spread: 554 case Opt_ssd_spread:
554 btrfs_set_and_info(info, SSD_SPREAD, 555 btrfs_set_and_info(info, SSD_SPREAD,
555 "use spread ssd allocation scheme"); 556 "use spread ssd allocation scheme");
556 btrfs_set_opt(info->mount_opt, SSD); 557 btrfs_set_opt(info->mount_opt, SSD);
558 btrfs_clear_opt(info->mount_opt, NOSSD);
557 break; 559 break;
558 case Opt_nossd: 560 case Opt_nossd:
559 btrfs_set_and_info(info, NOSSD, 561 btrfs_set_and_info(info, NOSSD,
560 "not using ssd allocation scheme"); 562 "not using ssd allocation scheme");
561 btrfs_clear_opt(info->mount_opt, SSD); 563 btrfs_clear_opt(info->mount_opt, SSD);
564 btrfs_clear_opt(info->mount_opt, SSD_SPREAD);
562 break; 565 break;
563 case Opt_barrier: 566 case Opt_barrier:
564 btrfs_clear_and_info(info, NOBARRIER, 567 btrfs_clear_and_info(info, NOBARRIER,
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 73d56eef5e60..ab8a66d852f9 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -6213,7 +6213,7 @@ int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6213 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { 6213 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6214 dev = bbio->stripes[dev_nr].dev; 6214 dev = bbio->stripes[dev_nr].dev;
6215 if (!dev || !dev->bdev || 6215 if (!dev || !dev->bdev ||
6216 (bio_op(bio) == REQ_OP_WRITE && !dev->writeable)) { 6216 (bio_op(first_bio) == REQ_OP_WRITE && !dev->writeable)) {
6217 bbio_error(bbio, first_bio, logical); 6217 bbio_error(bbio, first_bio, logical);
6218 continue; 6218 continue;
6219 } 6219 }
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 15e1db8738ae..dd3f5fabfdf6 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -972,6 +972,86 @@ out:
972 return rc; 972 return rc;
973} 973}
974 974
975ssize_t cifs_file_copychunk_range(unsigned int xid,
976 struct file *src_file, loff_t off,
977 struct file *dst_file, loff_t destoff,
978 size_t len, unsigned int flags)
979{
980 struct inode *src_inode = file_inode(src_file);
981 struct inode *target_inode = file_inode(dst_file);
982 struct cifsFileInfo *smb_file_src;
983 struct cifsFileInfo *smb_file_target;
984 struct cifs_tcon *src_tcon;
985 struct cifs_tcon *target_tcon;
986 ssize_t rc;
987
988 cifs_dbg(FYI, "copychunk range\n");
989
990 if (src_inode == target_inode) {
991 rc = -EINVAL;
992 goto out;
993 }
994
995 if (!src_file->private_data || !dst_file->private_data) {
996 rc = -EBADF;
997 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
998 goto out;
999 }
1000
1001 rc = -EXDEV;
1002 smb_file_target = dst_file->private_data;
1003 smb_file_src = src_file->private_data;
1004 src_tcon = tlink_tcon(smb_file_src->tlink);
1005 target_tcon = tlink_tcon(smb_file_target->tlink);
1006
1007 if (src_tcon->ses != target_tcon->ses) {
1008 cifs_dbg(VFS, "source and target of copy not on same server\n");
1009 goto out;
1010 }
1011
1012 /*
1013 * Note: cifs case is easier than btrfs since server responsible for
1014 * checks for proper open modes and file type and if it wants
1015 * server could even support copy of range where source = target
1016 */
1017 lock_two_nondirectories(target_inode, src_inode);
1018
1019 cifs_dbg(FYI, "about to flush pages\n");
1020 /* should we flush first and last page first */
1021 truncate_inode_pages(&target_inode->i_data, 0);
1022
1023 if (target_tcon->ses->server->ops->copychunk_range)
1024 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1025 smb_file_src, smb_file_target, off, len, destoff);
1026 else
1027 rc = -EOPNOTSUPP;
1028
1029 /* force revalidate of size and timestamps of target file now
1030 * that target is updated on the server
1031 */
1032 CIFS_I(target_inode)->time = 0;
1033 /* although unlocking in the reverse order from locking is not
1034 * strictly necessary here it is a little cleaner to be consistent
1035 */
1036 unlock_two_nondirectories(src_inode, target_inode);
1037
1038out:
1039 return rc;
1040}
1041
1042static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1043 struct file *dst_file, loff_t destoff,
1044 size_t len, unsigned int flags)
1045{
1046 unsigned int xid = get_xid();
1047 ssize_t rc;
1048
1049 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1050 len, flags);
1051 free_xid(xid);
1052 return rc;
1053}
1054
975const struct file_operations cifs_file_ops = { 1055const struct file_operations cifs_file_ops = {
976 .read_iter = cifs_loose_read_iter, 1056 .read_iter = cifs_loose_read_iter,
977 .write_iter = cifs_file_write_iter, 1057 .write_iter = cifs_file_write_iter,
@@ -984,6 +1064,7 @@ const struct file_operations cifs_file_ops = {
984 .splice_read = generic_file_splice_read, 1064 .splice_read = generic_file_splice_read,
985 .llseek = cifs_llseek, 1065 .llseek = cifs_llseek,
986 .unlocked_ioctl = cifs_ioctl, 1066 .unlocked_ioctl = cifs_ioctl,
1067 .copy_file_range = cifs_copy_file_range,
987 .clone_file_range = cifs_clone_file_range, 1068 .clone_file_range = cifs_clone_file_range,
988 .setlease = cifs_setlease, 1069 .setlease = cifs_setlease,
989 .fallocate = cifs_fallocate, 1070 .fallocate = cifs_fallocate,
@@ -1001,6 +1082,7 @@ const struct file_operations cifs_file_strict_ops = {
1001 .splice_read = generic_file_splice_read, 1082 .splice_read = generic_file_splice_read,
1002 .llseek = cifs_llseek, 1083 .llseek = cifs_llseek,
1003 .unlocked_ioctl = cifs_ioctl, 1084 .unlocked_ioctl = cifs_ioctl,
1085 .copy_file_range = cifs_copy_file_range,
1004 .clone_file_range = cifs_clone_file_range, 1086 .clone_file_range = cifs_clone_file_range,
1005 .setlease = cifs_setlease, 1087 .setlease = cifs_setlease,
1006 .fallocate = cifs_fallocate, 1088 .fallocate = cifs_fallocate,
@@ -1018,6 +1100,7 @@ const struct file_operations cifs_file_direct_ops = {
1018 .mmap = cifs_file_mmap, 1100 .mmap = cifs_file_mmap,
1019 .splice_read = generic_file_splice_read, 1101 .splice_read = generic_file_splice_read,
1020 .unlocked_ioctl = cifs_ioctl, 1102 .unlocked_ioctl = cifs_ioctl,
1103 .copy_file_range = cifs_copy_file_range,
1021 .clone_file_range = cifs_clone_file_range, 1104 .clone_file_range = cifs_clone_file_range,
1022 .llseek = cifs_llseek, 1105 .llseek = cifs_llseek,
1023 .setlease = cifs_setlease, 1106 .setlease = cifs_setlease,
@@ -1035,6 +1118,7 @@ const struct file_operations cifs_file_nobrl_ops = {
1035 .splice_read = generic_file_splice_read, 1118 .splice_read = generic_file_splice_read,
1036 .llseek = cifs_llseek, 1119 .llseek = cifs_llseek,
1037 .unlocked_ioctl = cifs_ioctl, 1120 .unlocked_ioctl = cifs_ioctl,
1121 .copy_file_range = cifs_copy_file_range,
1038 .clone_file_range = cifs_clone_file_range, 1122 .clone_file_range = cifs_clone_file_range,
1039 .setlease = cifs_setlease, 1123 .setlease = cifs_setlease,
1040 .fallocate = cifs_fallocate, 1124 .fallocate = cifs_fallocate,
@@ -1051,6 +1135,7 @@ const struct file_operations cifs_file_strict_nobrl_ops = {
1051 .splice_read = generic_file_splice_read, 1135 .splice_read = generic_file_splice_read,
1052 .llseek = cifs_llseek, 1136 .llseek = cifs_llseek,
1053 .unlocked_ioctl = cifs_ioctl, 1137 .unlocked_ioctl = cifs_ioctl,
1138 .copy_file_range = cifs_copy_file_range,
1054 .clone_file_range = cifs_clone_file_range, 1139 .clone_file_range = cifs_clone_file_range,
1055 .setlease = cifs_setlease, 1140 .setlease = cifs_setlease,
1056 .fallocate = cifs_fallocate, 1141 .fallocate = cifs_fallocate,
@@ -1067,6 +1152,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = {
1067 .mmap = cifs_file_mmap, 1152 .mmap = cifs_file_mmap,
1068 .splice_read = generic_file_splice_read, 1153 .splice_read = generic_file_splice_read,
1069 .unlocked_ioctl = cifs_ioctl, 1154 .unlocked_ioctl = cifs_ioctl,
1155 .copy_file_range = cifs_copy_file_range,
1070 .clone_file_range = cifs_clone_file_range, 1156 .clone_file_range = cifs_clone_file_range,
1071 .llseek = cifs_llseek, 1157 .llseek = cifs_llseek,
1072 .setlease = cifs_setlease, 1158 .setlease = cifs_setlease,
@@ -1078,6 +1164,7 @@ const struct file_operations cifs_dir_ops = {
1078 .release = cifs_closedir, 1164 .release = cifs_closedir,
1079 .read = generic_read_dir, 1165 .read = generic_read_dir,
1080 .unlocked_ioctl = cifs_ioctl, 1166 .unlocked_ioctl = cifs_ioctl,
1167 .copy_file_range = cifs_copy_file_range,
1081 .clone_file_range = cifs_clone_file_range, 1168 .clone_file_range = cifs_clone_file_range,
1082 .llseek = generic_file_llseek, 1169 .llseek = generic_file_llseek,
1083}; 1170};
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index da717fee3026..30bf89b1fd9a 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -139,6 +139,11 @@ extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
139# define cifs_listxattr NULL 139# define cifs_listxattr NULL
140#endif 140#endif
141 141
142extern ssize_t cifs_file_copychunk_range(unsigned int xid,
143 struct file *src_file, loff_t off,
144 struct file *dst_file, loff_t destoff,
145 size_t len, unsigned int flags);
146
142extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); 147extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
143#ifdef CONFIG_CIFS_NFSD_EXPORT 148#ifdef CONFIG_CIFS_NFSD_EXPORT
144extern const struct export_operations cifs_export_ops; 149extern const struct export_operations cifs_export_ops;
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index d42dd3288647..37f5a41cc50c 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -243,6 +243,7 @@ struct smb_version_operations {
243 /* verify the message */ 243 /* verify the message */
244 int (*check_message)(char *, unsigned int, struct TCP_Server_Info *); 244 int (*check_message)(char *, unsigned int, struct TCP_Server_Info *);
245 bool (*is_oplock_break)(char *, struct TCP_Server_Info *); 245 bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
246 int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *);
246 void (*downgrade_oplock)(struct TCP_Server_Info *, 247 void (*downgrade_oplock)(struct TCP_Server_Info *,
247 struct cifsInodeInfo *, bool); 248 struct cifsInodeInfo *, bool);
248 /* process transaction2 response */ 249 /* process transaction2 response */
@@ -407,9 +408,10 @@ struct smb_version_operations {
407 char * (*create_lease_buf)(u8 *, u8); 408 char * (*create_lease_buf)(u8 *, u8);
408 /* parse lease context buffer and return oplock/epoch info */ 409 /* parse lease context buffer and return oplock/epoch info */
409 __u8 (*parse_lease_buf)(void *, unsigned int *); 410 __u8 (*parse_lease_buf)(void *, unsigned int *);
410 int (*clone_range)(const unsigned int, struct cifsFileInfo *src_file, 411 ssize_t (*copychunk_range)(const unsigned int,
411 struct cifsFileInfo *target_file, u64 src_off, u64 len, 412 struct cifsFileInfo *src_file,
412 u64 dest_off); 413 struct cifsFileInfo *target_file,
414 u64 src_off, u64 len, u64 dest_off);
413 int (*duplicate_extents)(const unsigned int, struct cifsFileInfo *src, 415 int (*duplicate_extents)(const unsigned int, struct cifsFileInfo *src,
414 struct cifsFileInfo *target_file, u64 src_off, u64 len, 416 struct cifsFileInfo *target_file, u64 src_off, u64 len,
415 u64 dest_off); 417 u64 dest_off);
@@ -946,7 +948,6 @@ struct cifs_tcon {
946 bool use_persistent:1; /* use persistent instead of durable handles */ 948 bool use_persistent:1; /* use persistent instead of durable handles */
947#ifdef CONFIG_CIFS_SMB2 949#ifdef CONFIG_CIFS_SMB2
948 bool print:1; /* set if connection to printer share */ 950 bool print:1; /* set if connection to printer share */
949 bool bad_network_name:1; /* set if ret status STATUS_BAD_NETWORK_NAME */
950 __le32 capabilities; 951 __le32 capabilities;
951 __u32 share_flags; 952 __u32 share_flags;
952 __u32 maximal_access; 953 __u32 maximal_access;
@@ -1343,6 +1344,7 @@ struct mid_q_entry {
1343 void *callback_data; /* general purpose pointer for callback */ 1344 void *callback_data; /* general purpose pointer for callback */
1344 void *resp_buf; /* pointer to received SMB header */ 1345 void *resp_buf; /* pointer to received SMB header */
1345 int mid_state; /* wish this were enum but can not pass to wait_event */ 1346 int mid_state; /* wish this were enum but can not pass to wait_event */
1347 unsigned int mid_flags;
1346 __le16 command; /* smb command code */ 1348 __le16 command; /* smb command code */
1347 bool large_buf:1; /* if valid response, is pointer to large buf */ 1349 bool large_buf:1; /* if valid response, is pointer to large buf */
1348 bool multiRsp:1; /* multiple trans2 responses for one request */ 1350 bool multiRsp:1; /* multiple trans2 responses for one request */
@@ -1350,6 +1352,12 @@ struct mid_q_entry {
1350 bool decrypted:1; /* decrypted entry */ 1352 bool decrypted:1; /* decrypted entry */
1351}; 1353};
1352 1354
1355struct close_cancelled_open {
1356 struct cifs_fid fid;
1357 struct cifs_tcon *tcon;
1358 struct work_struct work;
1359};
1360
1353/* Make code in transport.c a little cleaner by moving 1361/* Make code in transport.c a little cleaner by moving
1354 update of optional stats into function below */ 1362 update of optional stats into function below */
1355#ifdef CONFIG_CIFS_STATS2 1363#ifdef CONFIG_CIFS_STATS2
@@ -1481,6 +1489,9 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
1481#define MID_RESPONSE_MALFORMED 0x10 1489#define MID_RESPONSE_MALFORMED 0x10
1482#define MID_SHUTDOWN 0x20 1490#define MID_SHUTDOWN 0x20
1483 1491
1492/* Flags */
1493#define MID_WAIT_CANCELLED 1 /* Cancelled while waiting for response */
1494
1484/* Types of response buffer returned from SendReceive2 */ 1495/* Types of response buffer returned from SendReceive2 */
1485#define CIFS_NO_BUFFER 0 /* Response buffer not returned */ 1496#define CIFS_NO_BUFFER 0 /* Response buffer not returned */
1486#define CIFS_SMALL_BUFFER 1 1497#define CIFS_SMALL_BUFFER 1
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 066950671929..5d21f00ae341 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1428,6 +1428,8 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1428 1428
1429 length = cifs_discard_remaining_data(server); 1429 length = cifs_discard_remaining_data(server);
1430 dequeue_mid(mid, rdata->result); 1430 dequeue_mid(mid, rdata->result);
1431 mid->resp_buf = server->smallbuf;
1432 server->smallbuf = NULL;
1431 return length; 1433 return length;
1432} 1434}
1433 1435
@@ -1541,6 +1543,8 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1541 return cifs_readv_discard(server, mid); 1543 return cifs_readv_discard(server, mid);
1542 1544
1543 dequeue_mid(mid, false); 1545 dequeue_mid(mid, false);
1546 mid->resp_buf = server->smallbuf;
1547 server->smallbuf = NULL;
1544 return length; 1548 return length;
1545} 1549}
1546 1550
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 9ae695ae3ed7..d82467cfb0e2 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -904,10 +904,19 @@ cifs_demultiplex_thread(void *p)
904 904
905 server->lstrp = jiffies; 905 server->lstrp = jiffies;
906 if (mid_entry != NULL) { 906 if (mid_entry != NULL) {
907 if ((mid_entry->mid_flags & MID_WAIT_CANCELLED) &&
908 mid_entry->mid_state == MID_RESPONSE_RECEIVED &&
909 server->ops->handle_cancelled_mid)
910 server->ops->handle_cancelled_mid(
911 mid_entry->resp_buf,
912 server);
913
907 if (!mid_entry->multiRsp || mid_entry->multiEnd) 914 if (!mid_entry->multiRsp || mid_entry->multiEnd)
908 mid_entry->callback(mid_entry); 915 mid_entry->callback(mid_entry);
909 } else if (!server->ops->is_oplock_break || 916 } else if (server->ops->is_oplock_break &&
910 !server->ops->is_oplock_break(buf, server)) { 917 server->ops->is_oplock_break(buf, server)) {
918 cifs_dbg(FYI, "Received oplock break\n");
919 } else {
911 cifs_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n", 920 cifs_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
912 atomic_read(&midCount)); 921 atomic_read(&midCount));
913 cifs_dump_mem("Received Data is: ", buf, 922 cifs_dump_mem("Received Data is: ", buf,
@@ -3744,6 +3753,9 @@ try_mount_again:
3744 if (IS_ERR(tcon)) { 3753 if (IS_ERR(tcon)) {
3745 rc = PTR_ERR(tcon); 3754 rc = PTR_ERR(tcon);
3746 tcon = NULL; 3755 tcon = NULL;
3756 if (rc == -EACCES)
3757 goto mount_fail_check;
3758
3747 goto remote_path_check; 3759 goto remote_path_check;
3748 } 3760 }
3749 3761
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index aa3debbba826..21d404535739 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2597,7 +2597,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2597 wdata->credits = credits; 2597 wdata->credits = credits;
2598 2598
2599 if (!wdata->cfile->invalidHandle || 2599 if (!wdata->cfile->invalidHandle ||
2600 !cifs_reopen_file(wdata->cfile, false)) 2600 !(rc = cifs_reopen_file(wdata->cfile, false)))
2601 rc = server->ops->async_writev(wdata, 2601 rc = server->ops->async_writev(wdata,
2602 cifs_uncached_writedata_release); 2602 cifs_uncached_writedata_release);
2603 if (rc) { 2603 if (rc) {
@@ -3022,7 +3022,7 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
3022 rdata->credits = credits; 3022 rdata->credits = credits;
3023 3023
3024 if (!rdata->cfile->invalidHandle || 3024 if (!rdata->cfile->invalidHandle ||
3025 !cifs_reopen_file(rdata->cfile, true)) 3025 !(rc = cifs_reopen_file(rdata->cfile, true)))
3026 rc = server->ops->async_readv(rdata); 3026 rc = server->ops->async_readv(rdata);
3027error: 3027error:
3028 if (rc) { 3028 if (rc) {
@@ -3617,7 +3617,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
3617 } 3617 }
3618 3618
3619 if (!rdata->cfile->invalidHandle || 3619 if (!rdata->cfile->invalidHandle ||
3620 !cifs_reopen_file(rdata->cfile, true)) 3620 !(rc = cifs_reopen_file(rdata->cfile, true)))
3621 rc = server->ops->async_readv(rdata); 3621 rc = server->ops->async_readv(rdata);
3622 if (rc) { 3622 if (rc) {
3623 add_credits_and_wake_if(server, rdata->credits, 0); 3623 add_credits_and_wake_if(server, rdata->credits, 0);
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 001528781b6b..265c45fe4ea5 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -34,71 +34,14 @@
34#include "cifs_ioctl.h" 34#include "cifs_ioctl.h"
35#include <linux/btrfs.h> 35#include <linux/btrfs.h>
36 36
37static int cifs_file_clone_range(unsigned int xid, struct file *src_file, 37static long cifs_ioctl_copychunk(unsigned int xid, struct file *dst_file,
38 struct file *dst_file)
39{
40 struct inode *src_inode = file_inode(src_file);
41 struct inode *target_inode = file_inode(dst_file);
42 struct cifsFileInfo *smb_file_src;
43 struct cifsFileInfo *smb_file_target;
44 struct cifs_tcon *src_tcon;
45 struct cifs_tcon *target_tcon;
46 int rc;
47
48 cifs_dbg(FYI, "ioctl clone range\n");
49
50 if (!src_file->private_data || !dst_file->private_data) {
51 rc = -EBADF;
52 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
53 goto out;
54 }
55
56 rc = -EXDEV;
57 smb_file_target = dst_file->private_data;
58 smb_file_src = src_file->private_data;
59 src_tcon = tlink_tcon(smb_file_src->tlink);
60 target_tcon = tlink_tcon(smb_file_target->tlink);
61
62 if (src_tcon->ses != target_tcon->ses) {
63 cifs_dbg(VFS, "source and target of copy not on same server\n");
64 goto out;
65 }
66
67 /*
68 * Note: cifs case is easier than btrfs since server responsible for
69 * checks for proper open modes and file type and if it wants
70 * server could even support copy of range where source = target
71 */
72 lock_two_nondirectories(target_inode, src_inode);
73
74 cifs_dbg(FYI, "about to flush pages\n");
75 /* should we flush first and last page first */
76 truncate_inode_pages(&target_inode->i_data, 0);
77
78 if (target_tcon->ses->server->ops->clone_range)
79 rc = target_tcon->ses->server->ops->clone_range(xid,
80 smb_file_src, smb_file_target, 0, src_inode->i_size, 0);
81 else
82 rc = -EOPNOTSUPP;
83
84 /* force revalidate of size and timestamps of target file now
85 that target is updated on the server */
86 CIFS_I(target_inode)->time = 0;
87 /* although unlocking in the reverse order from locking is not
88 strictly necessary here it is a little cleaner to be consistent */
89 unlock_two_nondirectories(src_inode, target_inode);
90out:
91 return rc;
92}
93
94static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
95 unsigned long srcfd) 38 unsigned long srcfd)
96{ 39{
97 int rc; 40 int rc;
98 struct fd src_file; 41 struct fd src_file;
99 struct inode *src_inode; 42 struct inode *src_inode;
100 43
101 cifs_dbg(FYI, "ioctl clone range\n"); 44 cifs_dbg(FYI, "ioctl copychunk range\n");
102 /* the destination must be opened for writing */ 45 /* the destination must be opened for writing */
103 if (!(dst_file->f_mode & FMODE_WRITE)) { 46 if (!(dst_file->f_mode & FMODE_WRITE)) {
104 cifs_dbg(FYI, "file target not open for write\n"); 47 cifs_dbg(FYI, "file target not open for write\n");
@@ -129,7 +72,8 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
129 if (S_ISDIR(src_inode->i_mode)) 72 if (S_ISDIR(src_inode->i_mode))
130 goto out_fput; 73 goto out_fput;
131 74
132 rc = cifs_file_clone_range(xid, src_file.file, dst_file); 75 rc = cifs_file_copychunk_range(xid, src_file.file, 0, dst_file, 0,
76 src_inode->i_size, 0);
133 77
134out_fput: 78out_fput:
135 fdput(src_file); 79 fdput(src_file);
@@ -251,7 +195,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
251 } 195 }
252 break; 196 break;
253 case CIFS_IOC_COPYCHUNK_FILE: 197 case CIFS_IOC_COPYCHUNK_FILE:
254 rc = cifs_ioctl_clone(xid, filep, arg); 198 rc = cifs_ioctl_copychunk(xid, filep, arg);
255 break; 199 break;
256 case CIFS_IOC_SET_INTEGRITY: 200 case CIFS_IOC_SET_INTEGRITY:
257 if (pSMBFile == NULL) 201 if (pSMBFile == NULL)
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index fd516ea8b8f8..1a04b3a5beb1 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -659,3 +659,49 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
659 cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n"); 659 cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
660 return false; 660 return false;
661} 661}
662
663void
664smb2_cancelled_close_fid(struct work_struct *work)
665{
666 struct close_cancelled_open *cancelled = container_of(work,
667 struct close_cancelled_open, work);
668
669 cifs_dbg(VFS, "Close unmatched open\n");
670
671 SMB2_close(0, cancelled->tcon, cancelled->fid.persistent_fid,
672 cancelled->fid.volatile_fid);
673 cifs_put_tcon(cancelled->tcon);
674 kfree(cancelled);
675}
676
677int
678smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
679{
680 struct smb2_sync_hdr *sync_hdr = get_sync_hdr(buffer);
681 struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer;
682 struct cifs_tcon *tcon;
683 struct close_cancelled_open *cancelled;
684
685 if (sync_hdr->Command != SMB2_CREATE ||
686 sync_hdr->Status != STATUS_SUCCESS)
687 return 0;
688
689 cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
690 if (!cancelled)
691 return -ENOMEM;
692
693 tcon = smb2_find_smb_tcon(server, sync_hdr->SessionId,
694 sync_hdr->TreeId);
695 if (!tcon) {
696 kfree(cancelled);
697 return -ENOENT;
698 }
699
700 cancelled->fid.persistent_fid = rsp->PersistentFileId;
701 cancelled->fid.volatile_fid = rsp->VolatileFileId;
702 cancelled->tcon = tcon;
703 INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
704 queue_work(cifsiod_wq, &cancelled->work);
705
706 return 0;
707}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 0231108d9387..152e37f2ad92 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -21,6 +21,7 @@
21#include <linux/vfs.h> 21#include <linux/vfs.h>
22#include <linux/falloc.h> 22#include <linux/falloc.h>
23#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
24#include <linux/uuid.h>
24#include <crypto/aead.h> 25#include <crypto/aead.h>
25#include "cifsglob.h" 26#include "cifsglob.h"
26#include "smb2pdu.h" 27#include "smb2pdu.h"
@@ -592,8 +593,8 @@ req_res_key_exit:
592 return rc; 593 return rc;
593} 594}
594 595
595static int 596static ssize_t
596smb2_clone_range(const unsigned int xid, 597smb2_copychunk_range(const unsigned int xid,
597 struct cifsFileInfo *srcfile, 598 struct cifsFileInfo *srcfile,
598 struct cifsFileInfo *trgtfile, u64 src_off, 599 struct cifsFileInfo *trgtfile, u64 src_off,
599 u64 len, u64 dest_off) 600 u64 len, u64 dest_off)
@@ -605,13 +606,14 @@ smb2_clone_range(const unsigned int xid,
605 struct cifs_tcon *tcon; 606 struct cifs_tcon *tcon;
606 int chunks_copied = 0; 607 int chunks_copied = 0;
607 bool chunk_sizes_updated = false; 608 bool chunk_sizes_updated = false;
609 ssize_t bytes_written, total_bytes_written = 0;
608 610
609 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL); 611 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
610 612
611 if (pcchunk == NULL) 613 if (pcchunk == NULL)
612 return -ENOMEM; 614 return -ENOMEM;
613 615
614 cifs_dbg(FYI, "in smb2_clone_range - about to call request res key\n"); 616 cifs_dbg(FYI, "in smb2_copychunk_range - about to call request res key\n");
615 /* Request a key from the server to identify the source of the copy */ 617 /* Request a key from the server to identify the source of the copy */
616 rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink), 618 rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
617 srcfile->fid.persistent_fid, 619 srcfile->fid.persistent_fid,
@@ -669,14 +671,16 @@ smb2_clone_range(const unsigned int xid,
669 } 671 }
670 chunks_copied++; 672 chunks_copied++;
671 673
672 src_off += le32_to_cpu(retbuf->TotalBytesWritten); 674 bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
673 dest_off += le32_to_cpu(retbuf->TotalBytesWritten); 675 src_off += bytes_written;
674 len -= le32_to_cpu(retbuf->TotalBytesWritten); 676 dest_off += bytes_written;
677 len -= bytes_written;
678 total_bytes_written += bytes_written;
675 679
676 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %d\n", 680 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
677 le32_to_cpu(retbuf->ChunksWritten), 681 le32_to_cpu(retbuf->ChunksWritten),
678 le32_to_cpu(retbuf->ChunkBytesWritten), 682 le32_to_cpu(retbuf->ChunkBytesWritten),
679 le32_to_cpu(retbuf->TotalBytesWritten)); 683 bytes_written);
680 } else if (rc == -EINVAL) { 684 } else if (rc == -EINVAL) {
681 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp)) 685 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
682 goto cchunk_out; 686 goto cchunk_out;
@@ -713,7 +717,10 @@ smb2_clone_range(const unsigned int xid,
713cchunk_out: 717cchunk_out:
714 kfree(pcchunk); 718 kfree(pcchunk);
715 kfree(retbuf); 719 kfree(retbuf);
716 return rc; 720 if (rc)
721 return rc;
722 else
723 return total_bytes_written;
717} 724}
718 725
719static int 726static int
@@ -2322,6 +2329,7 @@ struct smb_version_operations smb20_operations = {
2322 .clear_stats = smb2_clear_stats, 2329 .clear_stats = smb2_clear_stats,
2323 .print_stats = smb2_print_stats, 2330 .print_stats = smb2_print_stats,
2324 .is_oplock_break = smb2_is_valid_oplock_break, 2331 .is_oplock_break = smb2_is_valid_oplock_break,
2332 .handle_cancelled_mid = smb2_handle_cancelled_mid,
2325 .downgrade_oplock = smb2_downgrade_oplock, 2333 .downgrade_oplock = smb2_downgrade_oplock,
2326 .need_neg = smb2_need_neg, 2334 .need_neg = smb2_need_neg,
2327 .negotiate = smb2_negotiate, 2335 .negotiate = smb2_negotiate,
@@ -2377,7 +2385,7 @@ struct smb_version_operations smb20_operations = {
2377 .set_oplock_level = smb2_set_oplock_level, 2385 .set_oplock_level = smb2_set_oplock_level,
2378 .create_lease_buf = smb2_create_lease_buf, 2386 .create_lease_buf = smb2_create_lease_buf,
2379 .parse_lease_buf = smb2_parse_lease_buf, 2387 .parse_lease_buf = smb2_parse_lease_buf,
2380 .clone_range = smb2_clone_range, 2388 .copychunk_range = smb2_copychunk_range,
2381 .wp_retry_size = smb2_wp_retry_size, 2389 .wp_retry_size = smb2_wp_retry_size,
2382 .dir_needs_close = smb2_dir_needs_close, 2390 .dir_needs_close = smb2_dir_needs_close,
2383 .get_dfs_refer = smb2_get_dfs_refer, 2391 .get_dfs_refer = smb2_get_dfs_refer,
@@ -2404,6 +2412,7 @@ struct smb_version_operations smb21_operations = {
2404 .clear_stats = smb2_clear_stats, 2412 .clear_stats = smb2_clear_stats,
2405 .print_stats = smb2_print_stats, 2413 .print_stats = smb2_print_stats,
2406 .is_oplock_break = smb2_is_valid_oplock_break, 2414 .is_oplock_break = smb2_is_valid_oplock_break,
2415 .handle_cancelled_mid = smb2_handle_cancelled_mid,
2407 .downgrade_oplock = smb2_downgrade_oplock, 2416 .downgrade_oplock = smb2_downgrade_oplock,
2408 .need_neg = smb2_need_neg, 2417 .need_neg = smb2_need_neg,
2409 .negotiate = smb2_negotiate, 2418 .negotiate = smb2_negotiate,
@@ -2459,7 +2468,7 @@ struct smb_version_operations smb21_operations = {
2459 .set_oplock_level = smb21_set_oplock_level, 2468 .set_oplock_level = smb21_set_oplock_level,
2460 .create_lease_buf = smb2_create_lease_buf, 2469 .create_lease_buf = smb2_create_lease_buf,
2461 .parse_lease_buf = smb2_parse_lease_buf, 2470 .parse_lease_buf = smb2_parse_lease_buf,
2462 .clone_range = smb2_clone_range, 2471 .copychunk_range = smb2_copychunk_range,
2463 .wp_retry_size = smb2_wp_retry_size, 2472 .wp_retry_size = smb2_wp_retry_size,
2464 .dir_needs_close = smb2_dir_needs_close, 2473 .dir_needs_close = smb2_dir_needs_close,
2465 .enum_snapshots = smb3_enum_snapshots, 2474 .enum_snapshots = smb3_enum_snapshots,
@@ -2488,6 +2497,7 @@ struct smb_version_operations smb30_operations = {
2488 .print_stats = smb2_print_stats, 2497 .print_stats = smb2_print_stats,
2489 .dump_share_caps = smb2_dump_share_caps, 2498 .dump_share_caps = smb2_dump_share_caps,
2490 .is_oplock_break = smb2_is_valid_oplock_break, 2499 .is_oplock_break = smb2_is_valid_oplock_break,
2500 .handle_cancelled_mid = smb2_handle_cancelled_mid,
2491 .downgrade_oplock = smb2_downgrade_oplock, 2501 .downgrade_oplock = smb2_downgrade_oplock,
2492 .need_neg = smb2_need_neg, 2502 .need_neg = smb2_need_neg,
2493 .negotiate = smb2_negotiate, 2503 .negotiate = smb2_negotiate,
@@ -2545,7 +2555,7 @@ struct smb_version_operations smb30_operations = {
2545 .set_oplock_level = smb3_set_oplock_level, 2555 .set_oplock_level = smb3_set_oplock_level,
2546 .create_lease_buf = smb3_create_lease_buf, 2556 .create_lease_buf = smb3_create_lease_buf,
2547 .parse_lease_buf = smb3_parse_lease_buf, 2557 .parse_lease_buf = smb3_parse_lease_buf,
2548 .clone_range = smb2_clone_range, 2558 .copychunk_range = smb2_copychunk_range,
2549 .duplicate_extents = smb2_duplicate_extents, 2559 .duplicate_extents = smb2_duplicate_extents,
2550 .validate_negotiate = smb3_validate_negotiate, 2560 .validate_negotiate = smb3_validate_negotiate,
2551 .wp_retry_size = smb2_wp_retry_size, 2561 .wp_retry_size = smb2_wp_retry_size,
@@ -2582,6 +2592,7 @@ struct smb_version_operations smb311_operations = {
2582 .print_stats = smb2_print_stats, 2592 .print_stats = smb2_print_stats,
2583 .dump_share_caps = smb2_dump_share_caps, 2593 .dump_share_caps = smb2_dump_share_caps,
2584 .is_oplock_break = smb2_is_valid_oplock_break, 2594 .is_oplock_break = smb2_is_valid_oplock_break,
2595 .handle_cancelled_mid = smb2_handle_cancelled_mid,
2585 .downgrade_oplock = smb2_downgrade_oplock, 2596 .downgrade_oplock = smb2_downgrade_oplock,
2586 .need_neg = smb2_need_neg, 2597 .need_neg = smb2_need_neg,
2587 .negotiate = smb2_negotiate, 2598 .negotiate = smb2_negotiate,
@@ -2639,7 +2650,7 @@ struct smb_version_operations smb311_operations = {
2639 .set_oplock_level = smb3_set_oplock_level, 2650 .set_oplock_level = smb3_set_oplock_level,
2640 .create_lease_buf = smb3_create_lease_buf, 2651 .create_lease_buf = smb3_create_lease_buf,
2641 .parse_lease_buf = smb3_parse_lease_buf, 2652 .parse_lease_buf = smb3_parse_lease_buf,
2642 .clone_range = smb2_clone_range, 2653 .copychunk_range = smb2_copychunk_range,
2643 .duplicate_extents = smb2_duplicate_extents, 2654 .duplicate_extents = smb2_duplicate_extents,
2644/* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */ 2655/* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
2645 .wp_retry_size = smb2_wp_retry_size, 2656 .wp_retry_size = smb2_wp_retry_size,
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 7446496850a3..02da648041fc 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -562,8 +562,10 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
562 * but for time being this is our only auth choice so doesn't matter. 562 * but for time being this is our only auth choice so doesn't matter.
563 * We just found a server which sets blob length to zero expecting raw. 563 * We just found a server which sets blob length to zero expecting raw.
564 */ 564 */
565 if (blob_length == 0) 565 if (blob_length == 0) {
566 cifs_dbg(FYI, "missing security blob on negprot\n"); 566 cifs_dbg(FYI, "missing security blob on negprot\n");
567 server->sec_ntlmssp = true;
568 }
567 569
568 rc = cifs_enable_signing(server, ses->sign); 570 rc = cifs_enable_signing(server, ses->sign);
569 if (rc) 571 if (rc)
@@ -1171,9 +1173,6 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
1171 else 1173 else
1172 return -EIO; 1174 return -EIO;
1173 1175
1174 if (tcon && tcon->bad_network_name)
1175 return -ENOENT;
1176
1177 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL); 1176 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
1178 if (unc_path == NULL) 1177 if (unc_path == NULL)
1179 return -ENOMEM; 1178 return -ENOMEM;
@@ -1185,6 +1184,10 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
1185 return -EINVAL; 1184 return -EINVAL;
1186 } 1185 }
1187 1186
1187 /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
1188 if (tcon)
1189 tcon->tid = 0;
1190
1188 rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req); 1191 rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
1189 if (rc) { 1192 if (rc) {
1190 kfree(unc_path); 1193 kfree(unc_path);
@@ -1273,8 +1276,6 @@ tcon_exit:
1273tcon_error_exit: 1276tcon_error_exit:
1274 if (rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) { 1277 if (rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) {
1275 cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree); 1278 cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
1276 if (tcon)
1277 tcon->bad_network_name = true;
1278 } 1279 }
1279 goto tcon_exit; 1280 goto tcon_exit;
1280} 1281}
@@ -2177,6 +2178,9 @@ void smb2_reconnect_server(struct work_struct *work)
2177 struct cifs_tcon *tcon, *tcon2; 2178 struct cifs_tcon *tcon, *tcon2;
2178 struct list_head tmp_list; 2179 struct list_head tmp_list;
2179 int tcon_exist = false; 2180 int tcon_exist = false;
2181 int rc;
2182 int resched = false;
2183
2180 2184
2181 /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */ 2185 /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
2182 mutex_lock(&server->reconnect_mutex); 2186 mutex_lock(&server->reconnect_mutex);
@@ -2204,13 +2208,18 @@ void smb2_reconnect_server(struct work_struct *work)
2204 spin_unlock(&cifs_tcp_ses_lock); 2208 spin_unlock(&cifs_tcp_ses_lock);
2205 2209
2206 list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) { 2210 list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
2207 if (!smb2_reconnect(SMB2_INTERNAL_CMD, tcon)) 2211 rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon);
2212 if (!rc)
2208 cifs_reopen_persistent_handles(tcon); 2213 cifs_reopen_persistent_handles(tcon);
2214 else
2215 resched = true;
2209 list_del_init(&tcon->rlist); 2216 list_del_init(&tcon->rlist);
2210 cifs_put_tcon(tcon); 2217 cifs_put_tcon(tcon);
2211 } 2218 }
2212 2219
2213 cifs_dbg(FYI, "Reconnecting tcons finished\n"); 2220 cifs_dbg(FYI, "Reconnecting tcons finished\n");
2221 if (resched)
2222 queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
2214 mutex_unlock(&server->reconnect_mutex); 2223 mutex_unlock(&server->reconnect_mutex);
2215 2224
2216 /* now we can safely release srv struct */ 2225 /* now we can safely release srv struct */
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 69e35873b1de..6853454fc871 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -48,6 +48,10 @@ extern struct mid_q_entry *smb2_setup_request(struct cifs_ses *ses,
48 struct smb_rqst *rqst); 48 struct smb_rqst *rqst);
49extern struct mid_q_entry *smb2_setup_async_request( 49extern struct mid_q_entry *smb2_setup_async_request(
50 struct TCP_Server_Info *server, struct smb_rqst *rqst); 50 struct TCP_Server_Info *server, struct smb_rqst *rqst);
51extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server,
52 __u64 ses_id);
53extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server,
54 __u64 ses_id, __u32 tid);
51extern int smb2_calc_signature(struct smb_rqst *rqst, 55extern int smb2_calc_signature(struct smb_rqst *rqst,
52 struct TCP_Server_Info *server); 56 struct TCP_Server_Info *server);
53extern int smb3_calc_signature(struct smb_rqst *rqst, 57extern int smb3_calc_signature(struct smb_rqst *rqst,
@@ -164,6 +168,9 @@ extern int SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
164extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon, 168extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
165 const u64 persistent_fid, const u64 volatile_fid, 169 const u64 persistent_fid, const u64 volatile_fid,
166 const __u8 oplock_level); 170 const __u8 oplock_level);
171extern int smb2_handle_cancelled_mid(char *buffer,
172 struct TCP_Server_Info *server);
173void smb2_cancelled_close_fid(struct work_struct *work);
167extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon, 174extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
168 u64 persistent_file_id, u64 volatile_file_id, 175 u64 persistent_file_id, u64 volatile_file_id,
169 struct kstatfs *FSData); 176 struct kstatfs *FSData);
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 7c3bb1bd7eed..506b67fc93d9 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -115,23 +115,70 @@ smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
115 return 0; 115 return 0;
116} 116}
117 117
118struct cifs_ses * 118static struct cifs_ses *
119smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id) 119smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
120{ 120{
121 struct cifs_ses *ses; 121 struct cifs_ses *ses;
122 122
123 spin_lock(&cifs_tcp_ses_lock);
124 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { 123 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
125 if (ses->Suid != ses_id) 124 if (ses->Suid != ses_id)
126 continue; 125 continue;
127 spin_unlock(&cifs_tcp_ses_lock);
128 return ses; 126 return ses;
129 } 127 }
128
129 return NULL;
130}
131
132struct cifs_ses *
133smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id)
134{
135 struct cifs_ses *ses;
136
137 spin_lock(&cifs_tcp_ses_lock);
138 ses = smb2_find_smb_ses_unlocked(server, ses_id);
130 spin_unlock(&cifs_tcp_ses_lock); 139 spin_unlock(&cifs_tcp_ses_lock);
131 140
141 return ses;
142}
143
144static struct cifs_tcon *
145smb2_find_smb_sess_tcon_unlocked(struct cifs_ses *ses, __u32 tid)
146{
147 struct cifs_tcon *tcon;
148
149 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
150 if (tcon->tid != tid)
151 continue;
152 ++tcon->tc_count;
153 return tcon;
154 }
155
132 return NULL; 156 return NULL;
133} 157}
134 158
159/*
160 * Obtain tcon corresponding to the tid in the given
161 * cifs_ses
162 */
163
164struct cifs_tcon *
165smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid)
166{
167 struct cifs_ses *ses;
168 struct cifs_tcon *tcon;
169
170 spin_lock(&cifs_tcp_ses_lock);
171 ses = smb2_find_smb_ses_unlocked(server, ses_id);
172 if (!ses) {
173 spin_unlock(&cifs_tcp_ses_lock);
174 return NULL;
175 }
176 tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid);
177 spin_unlock(&cifs_tcp_ses_lock);
178
179 return tcon;
180}
181
135int 182int
136smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) 183smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
137{ 184{
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 526f0533cb4e..f6e13a977fc8 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -752,9 +752,11 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
752 752
753 rc = wait_for_response(ses->server, midQ); 753 rc = wait_for_response(ses->server, midQ);
754 if (rc != 0) { 754 if (rc != 0) {
755 cifs_dbg(FYI, "Cancelling wait for mid %llu\n", midQ->mid);
755 send_cancel(ses->server, rqst, midQ); 756 send_cancel(ses->server, rqst, midQ);
756 spin_lock(&GlobalMid_Lock); 757 spin_lock(&GlobalMid_Lock);
757 if (midQ->mid_state == MID_REQUEST_SUBMITTED) { 758 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
759 midQ->mid_flags |= MID_WAIT_CANCELLED;
758 midQ->callback = DeleteMidQEntry; 760 midQ->callback = DeleteMidQEntry;
759 spin_unlock(&GlobalMid_Lock); 761 spin_unlock(&GlobalMid_Lock);
760 add_credits(ses->server, 1, optype); 762 add_credits(ses->server, 1, optype);
diff --git a/fs/dax.c b/fs/dax.c
index de622d4282a6..85abd741253d 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -373,6 +373,22 @@ restart:
373 } 373 }
374 spin_lock_irq(&mapping->tree_lock); 374 spin_lock_irq(&mapping->tree_lock);
375 375
376 if (!entry) {
377 /*
378 * We needed to drop the page_tree lock while calling
379 * radix_tree_preload() and we didn't have an entry to
380 * lock. See if another thread inserted an entry at
381 * our index during this time.
382 */
383 entry = __radix_tree_lookup(&mapping->page_tree, index,
384 NULL, &slot);
385 if (entry) {
386 radix_tree_preload_end();
387 spin_unlock_irq(&mapping->tree_lock);
388 goto restart;
389 }
390 }
391
376 if (pmd_downgrade) { 392 if (pmd_downgrade) {
377 radix_tree_delete(&mapping->page_tree, index); 393 radix_tree_delete(&mapping->page_tree, index);
378 mapping->nrexceptional--; 394 mapping->nrexceptional--;
@@ -388,19 +404,12 @@ restart:
388 if (err) { 404 if (err) {
389 spin_unlock_irq(&mapping->tree_lock); 405 spin_unlock_irq(&mapping->tree_lock);
390 /* 406 /*
391 * Someone already created the entry? This is a 407 * Our insertion of a DAX entry failed, most likely
392 * normal failure when inserting PMDs in a range 408 * because we were inserting a PMD entry and it
393 * that already contains PTEs. In that case we want 409 * collided with a PTE sized entry at a different
394 * to return -EEXIST immediately. 410 * index in the PMD range. We haven't inserted
395 */ 411 * anything into the radix tree and have no waiters to
396 if (err == -EEXIST && !(size_flag & RADIX_DAX_PMD)) 412 * wake.
397 goto restart;
398 /*
399 * Our insertion of a DAX PMD entry failed, most
400 * likely because it collided with a PTE sized entry
401 * at a different index in the PMD range. We haven't
402 * inserted anything into the radix tree and have no
403 * waiters to wake.
404 */ 413 */
405 return ERR_PTR(err); 414 return ERR_PTR(err);
406 } 415 }
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index f493af666591..fb69ee2388db 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2466,6 +2466,7 @@ extern int ext4_setattr(struct dentry *, struct iattr *);
2466extern int ext4_getattr(const struct path *, struct kstat *, u32, unsigned int); 2466extern int ext4_getattr(const struct path *, struct kstat *, u32, unsigned int);
2467extern void ext4_evict_inode(struct inode *); 2467extern void ext4_evict_inode(struct inode *);
2468extern void ext4_clear_inode(struct inode *); 2468extern void ext4_clear_inode(struct inode *);
2469extern int ext4_file_getattr(const struct path *, struct kstat *, u32, unsigned int);
2469extern int ext4_sync_inode(handle_t *, struct inode *); 2470extern int ext4_sync_inode(handle_t *, struct inode *);
2470extern void ext4_dirty_inode(struct inode *, int); 2471extern void ext4_dirty_inode(struct inode *, int);
2471extern int ext4_change_inode_journal_flag(struct inode *, int); 2472extern int ext4_change_inode_journal_flag(struct inode *, int);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 8210c1f43556..cefa9835f275 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -744,7 +744,7 @@ const struct file_operations ext4_file_operations = {
744 744
745const struct inode_operations ext4_file_inode_operations = { 745const struct inode_operations ext4_file_inode_operations = {
746 .setattr = ext4_setattr, 746 .setattr = ext4_setattr,
747 .getattr = ext4_getattr, 747 .getattr = ext4_file_getattr,
748 .listxattr = ext4_listxattr, 748 .listxattr = ext4_listxattr,
749 .get_acl = ext4_get_acl, 749 .get_acl = ext4_get_acl,
750 .set_acl = ext4_set_acl, 750 .set_acl = ext4_set_acl,
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 4247d8d25687..b9ffa9f4191f 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5390,11 +5390,46 @@ err_out:
5390int ext4_getattr(const struct path *path, struct kstat *stat, 5390int ext4_getattr(const struct path *path, struct kstat *stat,
5391 u32 request_mask, unsigned int query_flags) 5391 u32 request_mask, unsigned int query_flags)
5392{ 5392{
5393 struct inode *inode; 5393 struct inode *inode = d_inode(path->dentry);
5394 unsigned long long delalloc_blocks; 5394 struct ext4_inode *raw_inode;
5395 struct ext4_inode_info *ei = EXT4_I(inode);
5396 unsigned int flags;
5397
5398 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
5399 stat->result_mask |= STATX_BTIME;
5400 stat->btime.tv_sec = ei->i_crtime.tv_sec;
5401 stat->btime.tv_nsec = ei->i_crtime.tv_nsec;
5402 }
5403
5404 flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
5405 if (flags & EXT4_APPEND_FL)
5406 stat->attributes |= STATX_ATTR_APPEND;
5407 if (flags & EXT4_COMPR_FL)
5408 stat->attributes |= STATX_ATTR_COMPRESSED;
5409 if (flags & EXT4_ENCRYPT_FL)
5410 stat->attributes |= STATX_ATTR_ENCRYPTED;
5411 if (flags & EXT4_IMMUTABLE_FL)
5412 stat->attributes |= STATX_ATTR_IMMUTABLE;
5413 if (flags & EXT4_NODUMP_FL)
5414 stat->attributes |= STATX_ATTR_NODUMP;
5415
5416 stat->attributes_mask |= (STATX_ATTR_APPEND |
5417 STATX_ATTR_COMPRESSED |
5418 STATX_ATTR_ENCRYPTED |
5419 STATX_ATTR_IMMUTABLE |
5420 STATX_ATTR_NODUMP);
5395 5421
5396 inode = d_inode(path->dentry);
5397 generic_fillattr(inode, stat); 5422 generic_fillattr(inode, stat);
5423 return 0;
5424}
5425
5426int ext4_file_getattr(const struct path *path, struct kstat *stat,
5427 u32 request_mask, unsigned int query_flags)
5428{
5429 struct inode *inode = d_inode(path->dentry);
5430 u64 delalloc_blocks;
5431
5432 ext4_getattr(path, stat, request_mask, query_flags);
5398 5433
5399 /* 5434 /*
5400 * If there is inline data in the inode, the inode will normally not 5435 * If there is inline data in the inode, the inode will normally not
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 6ad612c576fc..07e5e1405771 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -3912,6 +3912,7 @@ const struct inode_operations ext4_dir_inode_operations = {
3912 .tmpfile = ext4_tmpfile, 3912 .tmpfile = ext4_tmpfile,
3913 .rename = ext4_rename2, 3913 .rename = ext4_rename2,
3914 .setattr = ext4_setattr, 3914 .setattr = ext4_setattr,
3915 .getattr = ext4_getattr,
3915 .listxattr = ext4_listxattr, 3916 .listxattr = ext4_listxattr,
3916 .get_acl = ext4_get_acl, 3917 .get_acl = ext4_get_acl,
3917 .set_acl = ext4_set_acl, 3918 .set_acl = ext4_set_acl,
@@ -3920,6 +3921,7 @@ const struct inode_operations ext4_dir_inode_operations = {
3920 3921
3921const struct inode_operations ext4_special_inode_operations = { 3922const struct inode_operations ext4_special_inode_operations = {
3922 .setattr = ext4_setattr, 3923 .setattr = ext4_setattr,
3924 .getattr = ext4_getattr,
3923 .listxattr = ext4_listxattr, 3925 .listxattr = ext4_listxattr,
3924 .get_acl = ext4_get_acl, 3926 .get_acl = ext4_get_acl,
3925 .set_acl = ext4_set_acl, 3927 .set_acl = ext4_set_acl,
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index 73b184d161fc..5c8fc53cb0e5 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -85,17 +85,20 @@ errout:
85const struct inode_operations ext4_encrypted_symlink_inode_operations = { 85const struct inode_operations ext4_encrypted_symlink_inode_operations = {
86 .get_link = ext4_encrypted_get_link, 86 .get_link = ext4_encrypted_get_link,
87 .setattr = ext4_setattr, 87 .setattr = ext4_setattr,
88 .getattr = ext4_getattr,
88 .listxattr = ext4_listxattr, 89 .listxattr = ext4_listxattr,
89}; 90};
90 91
91const struct inode_operations ext4_symlink_inode_operations = { 92const struct inode_operations ext4_symlink_inode_operations = {
92 .get_link = page_get_link, 93 .get_link = page_get_link,
93 .setattr = ext4_setattr, 94 .setattr = ext4_setattr,
95 .getattr = ext4_getattr,
94 .listxattr = ext4_listxattr, 96 .listxattr = ext4_listxattr,
95}; 97};
96 98
97const struct inode_operations ext4_fast_symlink_inode_operations = { 99const struct inode_operations ext4_fast_symlink_inode_operations = {
98 .get_link = simple_get_link, 100 .get_link = simple_get_link,
99 .setattr = ext4_setattr, 101 .setattr = ext4_setattr,
102 .getattr = ext4_getattr,
100 .listxattr = ext4_listxattr, 103 .listxattr = ext4_listxattr,
101}; 104};
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 8f96461236f6..dde861387a40 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -136,17 +136,26 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
136 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; 136 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
137 vma->vm_ops = &hugetlb_vm_ops; 137 vma->vm_ops = &hugetlb_vm_ops;
138 138
139 /*
140 * Offset passed to mmap (before page shift) could have been
141 * negative when represented as a (l)off_t.
142 */
143 if (((loff_t)vma->vm_pgoff << PAGE_SHIFT) < 0)
144 return -EINVAL;
145
139 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) 146 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
140 return -EINVAL; 147 return -EINVAL;
141 148
142 vma_len = (loff_t)(vma->vm_end - vma->vm_start); 149 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
150 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
151 /* check for overflow */
152 if (len < vma_len)
153 return -EINVAL;
143 154
144 inode_lock(inode); 155 inode_lock(inode);
145 file_accessed(file); 156 file_accessed(file);
146 157
147 ret = -ENOMEM; 158 ret = -ENOMEM;
148 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
149
150 if (hugetlb_reserve_pages(inode, 159 if (hugetlb_reserve_pages(inode,
151 vma->vm_pgoff >> huge_page_order(h), 160 vma->vm_pgoff >> huge_page_order(h),
152 len >> huge_page_shift(h), vma, 161 len >> huge_page_shift(h), vma,
@@ -155,7 +164,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
155 164
156 ret = 0; 165 ret = 0;
157 if (vma->vm_flags & VM_WRITE && inode->i_size < len) 166 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
158 inode->i_size = len; 167 i_size_write(inode, len);
159out: 168out:
160 inode_unlock(inode); 169 inode_unlock(inode);
161 170
@@ -695,14 +704,11 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb,
695 704
696 inode = new_inode(sb); 705 inode = new_inode(sb);
697 if (inode) { 706 if (inode) {
698 struct hugetlbfs_inode_info *info;
699 inode->i_ino = get_next_ino(); 707 inode->i_ino = get_next_ino();
700 inode->i_mode = S_IFDIR | config->mode; 708 inode->i_mode = S_IFDIR | config->mode;
701 inode->i_uid = config->uid; 709 inode->i_uid = config->uid;
702 inode->i_gid = config->gid; 710 inode->i_gid = config->gid;
703 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 711 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
704 info = HUGETLBFS_I(inode);
705 mpol_shared_policy_init(&info->policy, NULL);
706 inode->i_op = &hugetlbfs_dir_inode_operations; 712 inode->i_op = &hugetlbfs_dir_inode_operations;
707 inode->i_fop = &simple_dir_operations; 713 inode->i_fop = &simple_dir_operations;
708 /* directory inodes start off with i_nlink == 2 (for "." entry) */ 714 /* directory inodes start off with i_nlink == 2 (for "." entry) */
@@ -733,7 +739,6 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
733 739
734 inode = new_inode(sb); 740 inode = new_inode(sb);
735 if (inode) { 741 if (inode) {
736 struct hugetlbfs_inode_info *info;
737 inode->i_ino = get_next_ino(); 742 inode->i_ino = get_next_ino();
738 inode_init_owner(inode, dir, mode); 743 inode_init_owner(inode, dir, mode);
739 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, 744 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
@@ -741,15 +746,6 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
741 inode->i_mapping->a_ops = &hugetlbfs_aops; 746 inode->i_mapping->a_ops = &hugetlbfs_aops;
742 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 747 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
743 inode->i_mapping->private_data = resv_map; 748 inode->i_mapping->private_data = resv_map;
744 info = HUGETLBFS_I(inode);
745 /*
746 * The policy is initialized here even if we are creating a
747 * private inode because initialization simply creates an
748 * an empty rb tree and calls rwlock_init(), later when we
749 * call mpol_free_shared_policy() it will just return because
750 * the rb tree will still be empty.
751 */
752 mpol_shared_policy_init(&info->policy, NULL);
753 switch (mode & S_IFMT) { 749 switch (mode & S_IFMT) {
754 default: 750 default:
755 init_special_inode(inode, mode, dev); 751 init_special_inode(inode, mode, dev);
@@ -937,6 +933,18 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
937 hugetlbfs_inc_free_inodes(sbinfo); 933 hugetlbfs_inc_free_inodes(sbinfo);
938 return NULL; 934 return NULL;
939 } 935 }
936
937 /*
938 * Any time after allocation, hugetlbfs_destroy_inode can be called
939 * for the inode. mpol_free_shared_policy is unconditionally called
940 * as part of hugetlbfs_destroy_inode. So, initialize policy here
941 * in case of a quick call to destroy.
942 *
943 * Note that the policy is initialized even if we are creating a
944 * private inode. This simplifies hugetlbfs_destroy_inode.
945 */
946 mpol_shared_policy_init(&p->policy, NULL);
947
940 return &p->vfs_inode; 948 return &p->vfs_inode;
941} 949}
942 950
diff --git a/fs/namei.c b/fs/namei.c
index d41fab78798b..19dcf62133cc 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2145,6 +2145,9 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
2145 int retval = 0; 2145 int retval = 0;
2146 const char *s = nd->name->name; 2146 const char *s = nd->name->name;
2147 2147
2148 if (!*s)
2149 flags &= ~LOOKUP_RCU;
2150
2148 nd->last_type = LAST_ROOT; /* if there are only slashes... */ 2151 nd->last_type = LAST_ROOT; /* if there are only slashes... */
2149 nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT; 2152 nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
2150 nd->depth = 0; 2153 nd->depth = 0;
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index fb499a3f21b5..f92ba8d6c556 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -2055,7 +2055,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2055{ 2055{
2056 struct inode *old_inode = d_inode(old_dentry); 2056 struct inode *old_inode = d_inode(old_dentry);
2057 struct inode *new_inode = d_inode(new_dentry); 2057 struct inode *new_inode = d_inode(new_dentry);
2058 struct dentry *dentry = NULL, *rehash = NULL; 2058 struct dentry *dentry = NULL;
2059 struct rpc_task *task; 2059 struct rpc_task *task;
2060 int error = -EBUSY; 2060 int error = -EBUSY;
2061 2061
@@ -2078,10 +2078,8 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2078 * To prevent any new references to the target during the 2078 * To prevent any new references to the target during the
2079 * rename, we unhash the dentry in advance. 2079 * rename, we unhash the dentry in advance.
2080 */ 2080 */
2081 if (!d_unhashed(new_dentry)) { 2081 if (!d_unhashed(new_dentry))
2082 d_drop(new_dentry); 2082 d_drop(new_dentry);
2083 rehash = new_dentry;
2084 }
2085 2083
2086 if (d_count(new_dentry) > 2) { 2084 if (d_count(new_dentry) > 2) {
2087 int err; 2085 int err;
@@ -2098,7 +2096,6 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2098 goto out; 2096 goto out;
2099 2097
2100 new_dentry = dentry; 2098 new_dentry = dentry;
2101 rehash = NULL;
2102 new_inode = NULL; 2099 new_inode = NULL;
2103 } 2100 }
2104 } 2101 }
@@ -2119,8 +2116,6 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2119 error = task->tk_status; 2116 error = task->tk_status;
2120 rpc_put_task(task); 2117 rpc_put_task(task);
2121out: 2118out:
2122 if (rehash)
2123 d_rehash(rehash);
2124 trace_nfs_rename_exit(old_dir, old_dentry, 2119 trace_nfs_rename_exit(old_dir, old_dentry,
2125 new_dir, new_dentry, error); 2120 new_dir, new_dentry, error);
2126 /* new dentry created? */ 2121 /* new dentry created? */
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 44347f4bdc15..acd30baca461 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -202,10 +202,10 @@ static int filelayout_async_handle_error(struct rpc_task *task,
202 task->tk_status); 202 task->tk_status);
203 nfs4_mark_deviceid_unavailable(devid); 203 nfs4_mark_deviceid_unavailable(devid);
204 pnfs_error_mark_layout_for_return(inode, lseg); 204 pnfs_error_mark_layout_for_return(inode, lseg);
205 pnfs_set_lo_fail(lseg);
206 rpc_wake_up(&tbl->slot_tbl_waitq); 205 rpc_wake_up(&tbl->slot_tbl_waitq);
207 /* fall through */ 206 /* fall through */
208 default: 207 default:
208 pnfs_set_lo_fail(lseg);
209reset: 209reset:
210 dprintk("%s Retry through MDS. Error %d\n", __func__, 210 dprintk("%s Retry through MDS. Error %d\n", __func__,
211 task->tk_status); 211 task->tk_status);
@@ -560,6 +560,50 @@ filelayout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
560 return PNFS_ATTEMPTED; 560 return PNFS_ATTEMPTED;
561} 561}
562 562
563static int
564filelayout_check_deviceid(struct pnfs_layout_hdr *lo,
565 struct nfs4_filelayout_segment *fl,
566 gfp_t gfp_flags)
567{
568 struct nfs4_deviceid_node *d;
569 struct nfs4_file_layout_dsaddr *dsaddr;
570 int status = -EINVAL;
571
572 /* find and reference the deviceid */
573 d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), &fl->deviceid,
574 lo->plh_lc_cred, gfp_flags);
575 if (d == NULL)
576 goto out;
577
578 dsaddr = container_of(d, struct nfs4_file_layout_dsaddr, id_node);
579 /* Found deviceid is unavailable */
580 if (filelayout_test_devid_unavailable(&dsaddr->id_node))
581 goto out_put;
582
583 fl->dsaddr = dsaddr;
584
585 if (fl->first_stripe_index >= dsaddr->stripe_count) {
586 dprintk("%s Bad first_stripe_index %u\n",
587 __func__, fl->first_stripe_index);
588 goto out_put;
589 }
590
591 if ((fl->stripe_type == STRIPE_SPARSE &&
592 fl->num_fh > 1 && fl->num_fh != dsaddr->ds_num) ||
593 (fl->stripe_type == STRIPE_DENSE &&
594 fl->num_fh != dsaddr->stripe_count)) {
595 dprintk("%s num_fh %u not valid for given packing\n",
596 __func__, fl->num_fh);
597 goto out_put;
598 }
599 status = 0;
600out:
601 return status;
602out_put:
603 nfs4_fl_put_deviceid(dsaddr);
604 goto out;
605}
606
563/* 607/*
564 * filelayout_check_layout() 608 * filelayout_check_layout()
565 * 609 *
@@ -572,11 +616,8 @@ static int
572filelayout_check_layout(struct pnfs_layout_hdr *lo, 616filelayout_check_layout(struct pnfs_layout_hdr *lo,
573 struct nfs4_filelayout_segment *fl, 617 struct nfs4_filelayout_segment *fl,
574 struct nfs4_layoutget_res *lgr, 618 struct nfs4_layoutget_res *lgr,
575 struct nfs4_deviceid *id,
576 gfp_t gfp_flags) 619 gfp_t gfp_flags)
577{ 620{
578 struct nfs4_deviceid_node *d;
579 struct nfs4_file_layout_dsaddr *dsaddr;
580 int status = -EINVAL; 621 int status = -EINVAL;
581 622
582 dprintk("--> %s\n", __func__); 623 dprintk("--> %s\n", __func__);
@@ -601,41 +642,10 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo,
601 goto out; 642 goto out;
602 } 643 }
603 644
604 /* find and reference the deviceid */
605 d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), id,
606 lo->plh_lc_cred, gfp_flags);
607 if (d == NULL)
608 goto out;
609
610 dsaddr = container_of(d, struct nfs4_file_layout_dsaddr, id_node);
611 /* Found deviceid is unavailable */
612 if (filelayout_test_devid_unavailable(&dsaddr->id_node))
613 goto out_put;
614
615 fl->dsaddr = dsaddr;
616
617 if (fl->first_stripe_index >= dsaddr->stripe_count) {
618 dprintk("%s Bad first_stripe_index %u\n",
619 __func__, fl->first_stripe_index);
620 goto out_put;
621 }
622
623 if ((fl->stripe_type == STRIPE_SPARSE &&
624 fl->num_fh > 1 && fl->num_fh != dsaddr->ds_num) ||
625 (fl->stripe_type == STRIPE_DENSE &&
626 fl->num_fh != dsaddr->stripe_count)) {
627 dprintk("%s num_fh %u not valid for given packing\n",
628 __func__, fl->num_fh);
629 goto out_put;
630 }
631
632 status = 0; 645 status = 0;
633out: 646out:
634 dprintk("--> %s returns %d\n", __func__, status); 647 dprintk("--> %s returns %d\n", __func__, status);
635 return status; 648 return status;
636out_put:
637 nfs4_fl_put_deviceid(dsaddr);
638 goto out;
639} 649}
640 650
641static void _filelayout_free_lseg(struct nfs4_filelayout_segment *fl) 651static void _filelayout_free_lseg(struct nfs4_filelayout_segment *fl)
@@ -657,7 +667,6 @@ static int
657filelayout_decode_layout(struct pnfs_layout_hdr *flo, 667filelayout_decode_layout(struct pnfs_layout_hdr *flo,
658 struct nfs4_filelayout_segment *fl, 668 struct nfs4_filelayout_segment *fl,
659 struct nfs4_layoutget_res *lgr, 669 struct nfs4_layoutget_res *lgr,
660 struct nfs4_deviceid *id,
661 gfp_t gfp_flags) 670 gfp_t gfp_flags)
662{ 671{
663 struct xdr_stream stream; 672 struct xdr_stream stream;
@@ -682,9 +691,9 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
682 if (unlikely(!p)) 691 if (unlikely(!p))
683 goto out_err; 692 goto out_err;
684 693
685 memcpy(id, p, sizeof(*id)); 694 memcpy(&fl->deviceid, p, sizeof(fl->deviceid));
686 p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE); 695 p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
687 nfs4_print_deviceid(id); 696 nfs4_print_deviceid(&fl->deviceid);
688 697
689 nfl_util = be32_to_cpup(p++); 698 nfl_util = be32_to_cpup(p++);
690 if (nfl_util & NFL4_UFLG_COMMIT_THRU_MDS) 699 if (nfl_util & NFL4_UFLG_COMMIT_THRU_MDS)
@@ -831,15 +840,14 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
831{ 840{
832 struct nfs4_filelayout_segment *fl; 841 struct nfs4_filelayout_segment *fl;
833 int rc; 842 int rc;
834 struct nfs4_deviceid id;
835 843
836 dprintk("--> %s\n", __func__); 844 dprintk("--> %s\n", __func__);
837 fl = kzalloc(sizeof(*fl), gfp_flags); 845 fl = kzalloc(sizeof(*fl), gfp_flags);
838 if (!fl) 846 if (!fl)
839 return NULL; 847 return NULL;
840 848
841 rc = filelayout_decode_layout(layoutid, fl, lgr, &id, gfp_flags); 849 rc = filelayout_decode_layout(layoutid, fl, lgr, gfp_flags);
842 if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id, gfp_flags)) { 850 if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, gfp_flags)) {
843 _filelayout_free_lseg(fl); 851 _filelayout_free_lseg(fl);
844 return NULL; 852 return NULL;
845 } 853 }
@@ -888,18 +896,51 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
888 return min(stripe_unit - (unsigned int)stripe_offset, size); 896 return min(stripe_unit - (unsigned int)stripe_offset, size);
889} 897}
890 898
899static struct pnfs_layout_segment *
900fl_pnfs_update_layout(struct inode *ino,
901 struct nfs_open_context *ctx,
902 loff_t pos,
903 u64 count,
904 enum pnfs_iomode iomode,
905 bool strict_iomode,
906 gfp_t gfp_flags)
907{
908 struct pnfs_layout_segment *lseg = NULL;
909 struct pnfs_layout_hdr *lo;
910 struct nfs4_filelayout_segment *fl;
911 int status;
912
913 lseg = pnfs_update_layout(ino, ctx, pos, count, iomode, strict_iomode,
914 gfp_flags);
915 if (!lseg)
916 lseg = ERR_PTR(-ENOMEM);
917 if (IS_ERR(lseg))
918 goto out;
919
920 lo = NFS_I(ino)->layout;
921 fl = FILELAYOUT_LSEG(lseg);
922
923 status = filelayout_check_deviceid(lo, fl, gfp_flags);
924 if (status)
925 lseg = ERR_PTR(status);
926out:
927 if (IS_ERR(lseg))
928 pnfs_put_lseg(lseg);
929 return lseg;
930}
931
891static void 932static void
892filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio, 933filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio,
893 struct nfs_page *req) 934 struct nfs_page *req)
894{ 935{
895 if (!pgio->pg_lseg) { 936 if (!pgio->pg_lseg) {
896 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 937 pgio->pg_lseg = fl_pnfs_update_layout(pgio->pg_inode,
897 req->wb_context, 938 req->wb_context,
898 0, 939 0,
899 NFS4_MAX_UINT64, 940 NFS4_MAX_UINT64,
900 IOMODE_READ, 941 IOMODE_READ,
901 false, 942 false,
902 GFP_KERNEL); 943 GFP_KERNEL);
903 if (IS_ERR(pgio->pg_lseg)) { 944 if (IS_ERR(pgio->pg_lseg)) {
904 pgio->pg_error = PTR_ERR(pgio->pg_lseg); 945 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
905 pgio->pg_lseg = NULL; 946 pgio->pg_lseg = NULL;
@@ -919,13 +960,13 @@ filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
919 int status; 960 int status;
920 961
921 if (!pgio->pg_lseg) { 962 if (!pgio->pg_lseg) {
922 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 963 pgio->pg_lseg = fl_pnfs_update_layout(pgio->pg_inode,
923 req->wb_context, 964 req->wb_context,
924 0, 965 0,
925 NFS4_MAX_UINT64, 966 NFS4_MAX_UINT64,
926 IOMODE_RW, 967 IOMODE_RW,
927 false, 968 false,
928 GFP_NOFS); 969 GFP_NOFS);
929 if (IS_ERR(pgio->pg_lseg)) { 970 if (IS_ERR(pgio->pg_lseg)) {
930 pgio->pg_error = PTR_ERR(pgio->pg_lseg); 971 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
931 pgio->pg_lseg = NULL; 972 pgio->pg_lseg = NULL;
diff --git a/fs/nfs/filelayout/filelayout.h b/fs/nfs/filelayout/filelayout.h
index 2896cb833a11..79323b5dab0c 100644
--- a/fs/nfs/filelayout/filelayout.h
+++ b/fs/nfs/filelayout/filelayout.h
@@ -55,15 +55,16 @@ struct nfs4_file_layout_dsaddr {
55}; 55};
56 56
57struct nfs4_filelayout_segment { 57struct nfs4_filelayout_segment {
58 struct pnfs_layout_segment generic_hdr; 58 struct pnfs_layout_segment generic_hdr;
59 u32 stripe_type; 59 u32 stripe_type;
60 u32 commit_through_mds; 60 u32 commit_through_mds;
61 u32 stripe_unit; 61 u32 stripe_unit;
62 u32 first_stripe_index; 62 u32 first_stripe_index;
63 u64 pattern_offset; 63 u64 pattern_offset;
64 struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */ 64 struct nfs4_deviceid deviceid;
65 unsigned int num_fh; 65 struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */
66 struct nfs_fh **fh_array; 66 unsigned int num_fh;
67 struct nfs_fh **fh_array;
67}; 68};
68 69
69struct nfs4_filelayout { 70struct nfs4_filelayout {
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index 85fde93dff77..457cfeb1d5c1 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -208,6 +208,10 @@ static bool ff_layout_mirror_valid(struct pnfs_layout_segment *lseg,
208 } else 208 } else
209 goto outerr; 209 goto outerr;
210 } 210 }
211
212 if (IS_ERR(mirror->mirror_ds))
213 goto outerr;
214
211 if (mirror->mirror_ds->ds == NULL) { 215 if (mirror->mirror_ds->ds == NULL) {
212 struct nfs4_deviceid_node *devid; 216 struct nfs4_deviceid_node *devid;
213 devid = &mirror->mirror_ds->id_node; 217 devid = &mirror->mirror_ds->id_node;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index c780d98035cc..201ca3f2c4ba 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2442,17 +2442,14 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
2442 } 2442 }
2443 2443
2444 nfs4_stateid_copy(&stateid, &delegation->stateid); 2444 nfs4_stateid_copy(&stateid, &delegation->stateid);
2445 if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) { 2445 if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) ||
2446 !test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
2447 &delegation->flags)) {
2446 rcu_read_unlock(); 2448 rcu_read_unlock();
2447 nfs_finish_clear_delegation_stateid(state, &stateid); 2449 nfs_finish_clear_delegation_stateid(state, &stateid);
2448 return; 2450 return;
2449 } 2451 }
2450 2452
2451 if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags)) {
2452 rcu_read_unlock();
2453 return;
2454 }
2455
2456 cred = get_rpccred(delegation->cred); 2453 cred = get_rpccred(delegation->cred);
2457 rcu_read_unlock(); 2454 rcu_read_unlock();
2458 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred); 2455 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred);
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 73e75ac90525..8bf8f667a8cf 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -538,13 +538,21 @@ out_free:
538 538
539static ssize_t 539static ssize_t
540nfsd_print_version_support(char *buf, int remaining, const char *sep, 540nfsd_print_version_support(char *buf, int remaining, const char *sep,
541 unsigned vers, unsigned minor) 541 unsigned vers, int minor)
542{ 542{
543 const char *format = (minor == 0) ? "%s%c%u" : "%s%c%u.%u"; 543 const char *format = minor < 0 ? "%s%c%u" : "%s%c%u.%u";
544 bool supported = !!nfsd_vers(vers, NFSD_TEST); 544 bool supported = !!nfsd_vers(vers, NFSD_TEST);
545 545
546 if (vers == 4 && !nfsd_minorversion(minor, NFSD_TEST)) 546 if (vers == 4 && minor >= 0 &&
547 !nfsd_minorversion(minor, NFSD_TEST))
547 supported = false; 548 supported = false;
549 if (minor == 0 && supported)
550 /*
551 * special case for backward compatability.
552 * +4.0 is never reported, it is implied by
553 * +4, unless -4.0 is present.
554 */
555 return 0;
548 return snprintf(buf, remaining, format, sep, 556 return snprintf(buf, remaining, format, sep,
549 supported ? '+' : '-', vers, minor); 557 supported ? '+' : '-', vers, minor);
550} 558}
@@ -554,7 +562,6 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
554 char *mesg = buf; 562 char *mesg = buf;
555 char *vers, *minorp, sign; 563 char *vers, *minorp, sign;
556 int len, num, remaining; 564 int len, num, remaining;
557 unsigned minor;
558 ssize_t tlen = 0; 565 ssize_t tlen = 0;
559 char *sep; 566 char *sep;
560 struct nfsd_net *nn = net_generic(netns(file), nfsd_net_id); 567 struct nfsd_net *nn = net_generic(netns(file), nfsd_net_id);
@@ -575,6 +582,7 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
575 if (len <= 0) return -EINVAL; 582 if (len <= 0) return -EINVAL;
576 do { 583 do {
577 enum vers_op cmd; 584 enum vers_op cmd;
585 unsigned minor;
578 sign = *vers; 586 sign = *vers;
579 if (sign == '+' || sign == '-') 587 if (sign == '+' || sign == '-')
580 num = simple_strtol((vers+1), &minorp, 0); 588 num = simple_strtol((vers+1), &minorp, 0);
@@ -585,8 +593,8 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
585 return -EINVAL; 593 return -EINVAL;
586 if (kstrtouint(minorp+1, 0, &minor) < 0) 594 if (kstrtouint(minorp+1, 0, &minor) < 0)
587 return -EINVAL; 595 return -EINVAL;
588 } else 596 }
589 minor = 0; 597
590 cmd = sign == '-' ? NFSD_CLEAR : NFSD_SET; 598 cmd = sign == '-' ? NFSD_CLEAR : NFSD_SET;
591 switch(num) { 599 switch(num) {
592 case 2: 600 case 2:
@@ -594,8 +602,20 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
594 nfsd_vers(num, cmd); 602 nfsd_vers(num, cmd);
595 break; 603 break;
596 case 4: 604 case 4:
597 if (nfsd_minorversion(minor, cmd) >= 0) 605 if (*minorp == '.') {
598 break; 606 if (nfsd_minorversion(minor, cmd) < 0)
607 return -EINVAL;
608 } else if ((cmd == NFSD_SET) != nfsd_vers(num, NFSD_TEST)) {
609 /*
610 * Either we have +4 and no minors are enabled,
611 * or we have -4 and at least one minor is enabled.
612 * In either case, propagate 'cmd' to all minors.
613 */
614 minor = 0;
615 while (nfsd_minorversion(minor, cmd) >= 0)
616 minor++;
617 }
618 break;
599 default: 619 default:
600 return -EINVAL; 620 return -EINVAL;
601 } 621 }
@@ -612,9 +632,11 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
612 sep = ""; 632 sep = "";
613 remaining = SIMPLE_TRANSACTION_LIMIT; 633 remaining = SIMPLE_TRANSACTION_LIMIT;
614 for (num=2 ; num <= 4 ; num++) { 634 for (num=2 ; num <= 4 ; num++) {
635 int minor;
615 if (!nfsd_vers(num, NFSD_AVAIL)) 636 if (!nfsd_vers(num, NFSD_AVAIL))
616 continue; 637 continue;
617 minor = 0; 638
639 minor = -1;
618 do { 640 do {
619 len = nfsd_print_version_support(buf, remaining, 641 len = nfsd_print_version_support(buf, remaining,
620 sep, num, minor); 642 sep, num, minor);
@@ -624,7 +646,8 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
624 buf += len; 646 buf += len;
625 tlen += len; 647 tlen += len;
626 minor++; 648 minor++;
627 sep = " "; 649 if (len)
650 sep = " ";
628 } while (num == 4 && minor <= NFSD_SUPPORTED_MINOR_VERSION); 651 } while (num == 4 && minor <= NFSD_SUPPORTED_MINOR_VERSION);
629 } 652 }
630out: 653out:
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index fa82b7707e85..03a7e9da4da0 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -786,6 +786,7 @@ nfserrno (int errno)
786 { nfserr_serverfault, -ESERVERFAULT }, 786 { nfserr_serverfault, -ESERVERFAULT },
787 { nfserr_serverfault, -ENFILE }, 787 { nfserr_serverfault, -ENFILE },
788 { nfserr_io, -EUCLEAN }, 788 { nfserr_io, -EUCLEAN },
789 { nfserr_perm, -ENOKEY },
789 }; 790 };
790 int i; 791 int i;
791 792
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 786a4a2cb2d7..31e1f9593457 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -167,7 +167,8 @@ nfsd_adjust_nfsd_versions4(void)
167 167
168int nfsd_minorversion(u32 minorversion, enum vers_op change) 168int nfsd_minorversion(u32 minorversion, enum vers_op change)
169{ 169{
170 if (minorversion > NFSD_SUPPORTED_MINOR_VERSION) 170 if (minorversion > NFSD_SUPPORTED_MINOR_VERSION &&
171 change != NFSD_AVAIL)
171 return -1; 172 return -1;
172 switch(change) { 173 switch(change) {
173 case NFSD_SET: 174 case NFSD_SET:
@@ -415,23 +416,20 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
415 416
416void nfsd_reset_versions(void) 417void nfsd_reset_versions(void)
417{ 418{
418 int found_one = 0;
419 int i; 419 int i;
420 420
421 for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) { 421 for (i = 0; i < NFSD_NRVERS; i++)
422 if (nfsd_program.pg_vers[i]) 422 if (nfsd_vers(i, NFSD_TEST))
423 found_one = 1; 423 return;
424 }
425 424
426 if (!found_one) { 425 for (i = 0; i < NFSD_NRVERS; i++)
427 for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) 426 if (i != 4)
428 nfsd_program.pg_vers[i] = nfsd_version[i]; 427 nfsd_vers(i, NFSD_SET);
429#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) 428 else {
430 for (i = NFSD_ACL_MINVERS; i < NFSD_ACL_NRVERS; i++) 429 int minor = 0;
431 nfsd_acl_program.pg_vers[i] = 430 while (nfsd_minorversion(minor, NFSD_SET) >= 0)
432 nfsd_acl_version[i]; 431 minor++;
433#endif 432 }
434 }
435} 433}
436 434
437/* 435/*
diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
index c4ab6fdf17a0..e1534c9bab16 100644
--- a/fs/orangefs/devorangefs-req.c
+++ b/fs/orangefs/devorangefs-req.c
@@ -208,14 +208,19 @@ restart:
208 continue; 208 continue;
209 /* 209 /*
210 * Skip ops whose filesystem we don't know about unless 210 * Skip ops whose filesystem we don't know about unless
211 * it is being mounted. 211 * it is being mounted or unmounted. It is possible for
212 * a filesystem we don't know about to be unmounted if
213 * it fails to mount in the kernel after userspace has
214 * been sent the mount request.
212 */ 215 */
213 /* XXX: is there a better way to detect this? */ 216 /* XXX: is there a better way to detect this? */
214 } else if (ret == -1 && 217 } else if (ret == -1 &&
215 !(op->upcall.type == 218 !(op->upcall.type ==
216 ORANGEFS_VFS_OP_FS_MOUNT || 219 ORANGEFS_VFS_OP_FS_MOUNT ||
217 op->upcall.type == 220 op->upcall.type ==
218 ORANGEFS_VFS_OP_GETATTR)) { 221 ORANGEFS_VFS_OP_GETATTR ||
222 op->upcall.type ==
223 ORANGEFS_VFS_OP_FS_UMOUNT)) {
219 gossip_debug(GOSSIP_DEV_DEBUG, 224 gossip_debug(GOSSIP_DEV_DEBUG,
220 "orangefs: skipping op tag %llu %s\n", 225 "orangefs: skipping op tag %llu %s\n",
221 llu(op->tag), get_opname_string(op)); 226 llu(op->tag), get_opname_string(op));
diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
index 5e48a0be9761..8afac46fcc87 100644
--- a/fs/orangefs/orangefs-kernel.h
+++ b/fs/orangefs/orangefs-kernel.h
@@ -249,6 +249,7 @@ struct orangefs_sb_info_s {
249 char devname[ORANGEFS_MAX_SERVER_ADDR_LEN]; 249 char devname[ORANGEFS_MAX_SERVER_ADDR_LEN];
250 struct super_block *sb; 250 struct super_block *sb;
251 int mount_pending; 251 int mount_pending;
252 int no_list;
252 struct list_head list; 253 struct list_head list;
253}; 254};
254 255
diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
index 67c24351a67f..629d8c917fa6 100644
--- a/fs/orangefs/super.c
+++ b/fs/orangefs/super.c
@@ -263,8 +263,13 @@ int orangefs_remount(struct orangefs_sb_info_s *orangefs_sb)
263 if (!new_op) 263 if (!new_op)
264 return -ENOMEM; 264 return -ENOMEM;
265 new_op->upcall.req.features.features = 0; 265 new_op->upcall.req.features.features = 0;
266 ret = service_operation(new_op, "orangefs_features", 0); 266 ret = service_operation(new_op, "orangefs_features",
267 orangefs_features = new_op->downcall.resp.features.features; 267 ORANGEFS_OP_PRIORITY | ORANGEFS_OP_NO_MUTEX);
268 if (!ret)
269 orangefs_features =
270 new_op->downcall.resp.features.features;
271 else
272 orangefs_features = 0;
268 op_release(new_op); 273 op_release(new_op);
269 } else { 274 } else {
270 orangefs_features = 0; 275 orangefs_features = 0;
@@ -488,7 +493,7 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
488 493
489 if (ret) { 494 if (ret) {
490 d = ERR_PTR(ret); 495 d = ERR_PTR(ret);
491 goto free_op; 496 goto free_sb_and_op;
492 } 497 }
493 498
494 /* 499 /*
@@ -514,6 +519,9 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
514 spin_unlock(&orangefs_superblocks_lock); 519 spin_unlock(&orangefs_superblocks_lock);
515 op_release(new_op); 520 op_release(new_op);
516 521
522 /* Must be removed from the list now. */
523 ORANGEFS_SB(sb)->no_list = 0;
524
517 if (orangefs_userspace_version >= 20906) { 525 if (orangefs_userspace_version >= 20906) {
518 new_op = op_alloc(ORANGEFS_VFS_OP_FEATURES); 526 new_op = op_alloc(ORANGEFS_VFS_OP_FEATURES);
519 if (!new_op) 527 if (!new_op)
@@ -528,6 +536,10 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
528 536
529 return dget(sb->s_root); 537 return dget(sb->s_root);
530 538
539free_sb_and_op:
540 /* Will call orangefs_kill_sb with sb not in list. */
541 ORANGEFS_SB(sb)->no_list = 1;
542 deactivate_locked_super(sb);
531free_op: 543free_op:
532 gossip_err("orangefs_mount: mount request failed with %d\n", ret); 544 gossip_err("orangefs_mount: mount request failed with %d\n", ret);
533 if (ret == -EINVAL) { 545 if (ret == -EINVAL) {
@@ -553,12 +565,14 @@ void orangefs_kill_sb(struct super_block *sb)
553 */ 565 */
554 orangefs_unmount_sb(sb); 566 orangefs_unmount_sb(sb);
555 567
556 /* remove the sb from our list of orangefs specific sb's */ 568 if (!ORANGEFS_SB(sb)->no_list) {
557 569 /* remove the sb from our list of orangefs specific sb's */
558 spin_lock(&orangefs_superblocks_lock); 570 spin_lock(&orangefs_superblocks_lock);
559 __list_del_entry(&ORANGEFS_SB(sb)->list); /* not list_del_init */ 571 /* not list_del_init */
560 ORANGEFS_SB(sb)->list.prev = NULL; 572 __list_del_entry(&ORANGEFS_SB(sb)->list);
561 spin_unlock(&orangefs_superblocks_lock); 573 ORANGEFS_SB(sb)->list.prev = NULL;
574 spin_unlock(&orangefs_superblocks_lock);
575 }
562 576
563 /* 577 /*
564 * make sure that ORANGEFS_DEV_REMOUNT_ALL loop that might've seen us 578 * make sure that ORANGEFS_DEV_REMOUNT_ALL loop that might've seen us
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 8f91ec66baa3..d04ea4349909 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -1074,6 +1074,7 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
1074 1074
1075 if ((table->proc_handler == proc_dostring) || 1075 if ((table->proc_handler == proc_dostring) ||
1076 (table->proc_handler == proc_dointvec) || 1076 (table->proc_handler == proc_dointvec) ||
1077 (table->proc_handler == proc_douintvec) ||
1077 (table->proc_handler == proc_dointvec_minmax) || 1078 (table->proc_handler == proc_dointvec_minmax) ||
1078 (table->proc_handler == proc_dointvec_jiffies) || 1079 (table->proc_handler == proc_dointvec_jiffies) ||
1079 (table->proc_handler == proc_dointvec_userhz_jiffies) || 1080 (table->proc_handler == proc_dointvec_userhz_jiffies) ||
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f08bd31c1081..312578089544 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -900,7 +900,14 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
900static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, 900static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
901 unsigned long addr, pmd_t *pmdp) 901 unsigned long addr, pmd_t *pmdp)
902{ 902{
903 pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); 903 pmd_t pmd = *pmdp;
904
905 /* See comment in change_huge_pmd() */
906 pmdp_invalidate(vma, addr, pmdp);
907 if (pmd_dirty(*pmdp))
908 pmd = pmd_mkdirty(pmd);
909 if (pmd_young(*pmdp))
910 pmd = pmd_mkyoung(pmd);
904 911
905 pmd = pmd_wrprotect(pmd); 912 pmd = pmd_wrprotect(pmd);
906 pmd = pmd_clear_soft_dirty(pmd); 913 pmd = pmd_clear_soft_dirty(pmd);
diff --git a/fs/stat.c b/fs/stat.c
index fa0be59340cc..c6c963b2546b 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -130,9 +130,13 @@ EXPORT_SYMBOL(vfs_getattr);
130int vfs_statx_fd(unsigned int fd, struct kstat *stat, 130int vfs_statx_fd(unsigned int fd, struct kstat *stat,
131 u32 request_mask, unsigned int query_flags) 131 u32 request_mask, unsigned int query_flags)
132{ 132{
133 struct fd f = fdget_raw(fd); 133 struct fd f;
134 int error = -EBADF; 134 int error = -EBADF;
135 135
136 if (query_flags & ~KSTAT_QUERY_FLAGS)
137 return -EINVAL;
138
139 f = fdget_raw(fd);
136 if (f.file) { 140 if (f.file) {
137 error = vfs_getattr(&f.file->f_path, stat, 141 error = vfs_getattr(&f.file->f_path, stat,
138 request_mask, query_flags); 142 request_mask, query_flags);
@@ -155,9 +159,6 @@ EXPORT_SYMBOL(vfs_statx_fd);
155 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink 159 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink
156 * at the given name from being referenced. 160 * at the given name from being referenced.
157 * 161 *
158 * The caller must have preset stat->request_mask as for vfs_getattr(). The
159 * flags are also used to load up stat->query_flags.
160 *
161 * 0 will be returned on success, and a -ve error code if unsuccessful. 162 * 0 will be returned on success, and a -ve error code if unsuccessful.
162 */ 163 */
163int vfs_statx(int dfd, const char __user *filename, int flags, 164int vfs_statx(int dfd, const char __user *filename, int flags,
@@ -509,46 +510,38 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
509} 510}
510#endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */ 511#endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
511 512
512static inline int __put_timestamp(struct timespec *kts, 513static noinline_for_stack int
513 struct statx_timestamp __user *uts) 514cp_statx(const struct kstat *stat, struct statx __user *buffer)
514{
515 return (__put_user(kts->tv_sec, &uts->tv_sec ) ||
516 __put_user(kts->tv_nsec, &uts->tv_nsec ) ||
517 __put_user(0, &uts->__reserved ));
518}
519
520/*
521 * Set the statx results.
522 */
523static long statx_set_result(struct kstat *stat, struct statx __user *buffer)
524{ 515{
525 uid_t uid = from_kuid_munged(current_user_ns(), stat->uid); 516 struct statx tmp;
526 gid_t gid = from_kgid_munged(current_user_ns(), stat->gid); 517
527 518 memset(&tmp, 0, sizeof(tmp));
528 if (__put_user(stat->result_mask, &buffer->stx_mask ) || 519
529 __put_user(stat->mode, &buffer->stx_mode ) || 520 tmp.stx_mask = stat->result_mask;
530 __clear_user(&buffer->__spare0, sizeof(buffer->__spare0)) || 521 tmp.stx_blksize = stat->blksize;
531 __put_user(stat->nlink, &buffer->stx_nlink ) || 522 tmp.stx_attributes = stat->attributes;
532 __put_user(uid, &buffer->stx_uid ) || 523 tmp.stx_nlink = stat->nlink;
533 __put_user(gid, &buffer->stx_gid ) || 524 tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid);
534 __put_user(stat->attributes, &buffer->stx_attributes ) || 525 tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid);
535 __put_user(stat->blksize, &buffer->stx_blksize ) || 526 tmp.stx_mode = stat->mode;
536 __put_user(MAJOR(stat->rdev), &buffer->stx_rdev_major ) || 527 tmp.stx_ino = stat->ino;
537 __put_user(MINOR(stat->rdev), &buffer->stx_rdev_minor ) || 528 tmp.stx_size = stat->size;
538 __put_user(MAJOR(stat->dev), &buffer->stx_dev_major ) || 529 tmp.stx_blocks = stat->blocks;
539 __put_user(MINOR(stat->dev), &buffer->stx_dev_minor ) || 530 tmp.stx_attributes_mask = stat->attributes_mask;
540 __put_timestamp(&stat->atime, &buffer->stx_atime ) || 531 tmp.stx_atime.tv_sec = stat->atime.tv_sec;
541 __put_timestamp(&stat->btime, &buffer->stx_btime ) || 532 tmp.stx_atime.tv_nsec = stat->atime.tv_nsec;
542 __put_timestamp(&stat->ctime, &buffer->stx_ctime ) || 533 tmp.stx_btime.tv_sec = stat->btime.tv_sec;
543 __put_timestamp(&stat->mtime, &buffer->stx_mtime ) || 534 tmp.stx_btime.tv_nsec = stat->btime.tv_nsec;
544 __put_user(stat->ino, &buffer->stx_ino ) || 535 tmp.stx_ctime.tv_sec = stat->ctime.tv_sec;
545 __put_user(stat->size, &buffer->stx_size ) || 536 tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec;
546 __put_user(stat->blocks, &buffer->stx_blocks ) || 537 tmp.stx_mtime.tv_sec = stat->mtime.tv_sec;
547 __clear_user(&buffer->__spare1, sizeof(buffer->__spare1)) || 538 tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec;
548 __clear_user(&buffer->__spare2, sizeof(buffer->__spare2))) 539 tmp.stx_rdev_major = MAJOR(stat->rdev);
549 return -EFAULT; 540 tmp.stx_rdev_minor = MINOR(stat->rdev);
550 541 tmp.stx_dev_major = MAJOR(stat->dev);
551 return 0; 542 tmp.stx_dev_minor = MINOR(stat->dev);
543
544 return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
552} 545}
553 546
554/** 547/**
@@ -570,10 +563,10 @@ SYSCALL_DEFINE5(statx,
570 struct kstat stat; 563 struct kstat stat;
571 int error; 564 int error;
572 565
566 if (mask & STATX__RESERVED)
567 return -EINVAL;
573 if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) 568 if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
574 return -EINVAL; 569 return -EINVAL;
575 if (!access_ok(VERIFY_WRITE, buffer, sizeof(*buffer)))
576 return -EFAULT;
577 570
578 if (filename) 571 if (filename)
579 error = vfs_statx(dfd, filename, flags, &stat, mask); 572 error = vfs_statx(dfd, filename, flags, &stat, mask);
@@ -581,7 +574,8 @@ SYSCALL_DEFINE5(statx,
581 error = vfs_statx_fd(dfd, &stat, mask, flags); 574 error = vfs_statx_fd(dfd, &stat, mask, flags);
582 if (error) 575 if (error)
583 return error; 576 return error;
584 return statx_set_result(&stat, buffer); 577
578 return cp_statx(&stat, buffer);
585} 579}
586 580
587/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */ 581/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index b803213d1307..39c75a86c67f 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -108,7 +108,7 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
108{ 108{
109 const struct sysfs_ops *ops = sysfs_file_ops(of->kn); 109 const struct sysfs_ops *ops = sysfs_file_ops(of->kn);
110 struct kobject *kobj = of->kn->parent->priv; 110 struct kobject *kobj = of->kn->parent->priv;
111 size_t len; 111 ssize_t len;
112 112
113 /* 113 /*
114 * If buf != of->prealloc_buf, we don't know how 114 * If buf != of->prealloc_buf, we don't know how
@@ -117,13 +117,15 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
117 if (WARN_ON_ONCE(buf != of->prealloc_buf)) 117 if (WARN_ON_ONCE(buf != of->prealloc_buf))
118 return 0; 118 return 0;
119 len = ops->show(kobj, of->kn->priv, buf); 119 len = ops->show(kobj, of->kn->priv, buf);
120 if (len < 0)
121 return len;
120 if (pos) { 122 if (pos) {
121 if (len <= pos) 123 if (len <= pos)
122 return 0; 124 return 0;
123 len -= pos; 125 len -= pos;
124 memmove(buf, buf + pos, len); 126 memmove(buf, buf + pos, len);
125 } 127 }
126 return min(count, len); 128 return min_t(ssize_t, count, len);
127} 129}
128 130
129/* kernfs write callback for regular sysfs files */ 131/* kernfs write callback for regular sysfs files */
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 1d227b0fcf49..f7555fc25877 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1756,7 +1756,7 @@ static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
1756 * protocols: aa:... bb:... 1756 * protocols: aa:... bb:...
1757 */ 1757 */
1758 seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n", 1758 seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
1759 pending, total, UFFD_API, UFFD_API_FEATURES, 1759 pending, total, UFFD_API, ctx->features,
1760 UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS); 1760 UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
1761} 1761}
1762#endif 1762#endif
diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h
index eb00bc133bca..39f8604f764e 100644
--- a/fs/xfs/libxfs/xfs_dir2_priv.h
+++ b/fs/xfs/libxfs/xfs_dir2_priv.h
@@ -125,8 +125,7 @@ extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino);
125extern int xfs_dir2_sf_lookup(struct xfs_da_args *args); 125extern int xfs_dir2_sf_lookup(struct xfs_da_args *args);
126extern int xfs_dir2_sf_removename(struct xfs_da_args *args); 126extern int xfs_dir2_sf_removename(struct xfs_da_args *args);
127extern int xfs_dir2_sf_replace(struct xfs_da_args *args); 127extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
128extern int xfs_dir2_sf_verify(struct xfs_mount *mp, struct xfs_dir2_sf_hdr *sfp, 128extern int xfs_dir2_sf_verify(struct xfs_inode *ip);
129 int size);
130 129
131/* xfs_dir2_readdir.c */ 130/* xfs_dir2_readdir.c */
132extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx, 131extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx,
diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
index 96b45cd6c63f..e84af093b2ab 100644
--- a/fs/xfs/libxfs/xfs_dir2_sf.c
+++ b/fs/xfs/libxfs/xfs_dir2_sf.c
@@ -632,36 +632,49 @@ xfs_dir2_sf_check(
632/* Verify the consistency of an inline directory. */ 632/* Verify the consistency of an inline directory. */
633int 633int
634xfs_dir2_sf_verify( 634xfs_dir2_sf_verify(
635 struct xfs_mount *mp, 635 struct xfs_inode *ip)
636 struct xfs_dir2_sf_hdr *sfp,
637 int size)
638{ 636{
637 struct xfs_mount *mp = ip->i_mount;
638 struct xfs_dir2_sf_hdr *sfp;
639 struct xfs_dir2_sf_entry *sfep; 639 struct xfs_dir2_sf_entry *sfep;
640 struct xfs_dir2_sf_entry *next_sfep; 640 struct xfs_dir2_sf_entry *next_sfep;
641 char *endp; 641 char *endp;
642 const struct xfs_dir_ops *dops; 642 const struct xfs_dir_ops *dops;
643 struct xfs_ifork *ifp;
643 xfs_ino_t ino; 644 xfs_ino_t ino;
644 int i; 645 int i;
645 int i8count; 646 int i8count;
646 int offset; 647 int offset;
648 int size;
649 int error;
647 __uint8_t filetype; 650 __uint8_t filetype;
648 651
652 ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_LOCAL);
653 /*
654 * xfs_iread calls us before xfs_setup_inode sets up ip->d_ops,
655 * so we can only trust the mountpoint to have the right pointer.
656 */
649 dops = xfs_dir_get_ops(mp, NULL); 657 dops = xfs_dir_get_ops(mp, NULL);
650 658
659 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
660 sfp = (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data;
661 size = ifp->if_bytes;
662
651 /* 663 /*
652 * Give up if the directory is way too short. 664 * Give up if the directory is way too short.
653 */ 665 */
654 XFS_WANT_CORRUPTED_RETURN(mp, size > 666 if (size <= offsetof(struct xfs_dir2_sf_hdr, parent) ||
655 offsetof(struct xfs_dir2_sf_hdr, parent)); 667 size < xfs_dir2_sf_hdr_size(sfp->i8count))
656 XFS_WANT_CORRUPTED_RETURN(mp, size >= 668 return -EFSCORRUPTED;
657 xfs_dir2_sf_hdr_size(sfp->i8count));
658 669
659 endp = (char *)sfp + size; 670 endp = (char *)sfp + size;
660 671
661 /* Check .. entry */ 672 /* Check .. entry */
662 ino = dops->sf_get_parent_ino(sfp); 673 ino = dops->sf_get_parent_ino(sfp);
663 i8count = ino > XFS_DIR2_MAX_SHORT_INUM; 674 i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
664 XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino)); 675 error = xfs_dir_ino_validate(mp, ino);
676 if (error)
677 return error;
665 offset = dops->data_first_offset; 678 offset = dops->data_first_offset;
666 679
667 /* Check all reported entries */ 680 /* Check all reported entries */
@@ -672,12 +685,12 @@ xfs_dir2_sf_verify(
672 * Check the fixed-offset parts of the structure are 685 * Check the fixed-offset parts of the structure are
673 * within the data buffer. 686 * within the data buffer.
674 */ 687 */
675 XFS_WANT_CORRUPTED_RETURN(mp, 688 if (((char *)sfep + sizeof(*sfep)) >= endp)
676 ((char *)sfep + sizeof(*sfep)) < endp); 689 return -EFSCORRUPTED;
677 690
678 /* Don't allow names with known bad length. */ 691 /* Don't allow names with known bad length. */
679 XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen > 0); 692 if (sfep->namelen == 0)
680 XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen < MAXNAMELEN); 693 return -EFSCORRUPTED;
681 694
682 /* 695 /*
683 * Check that the variable-length part of the structure is 696 * Check that the variable-length part of the structure is
@@ -685,33 +698,39 @@ xfs_dir2_sf_verify(
685 * name component, so nextentry is an acceptable test. 698 * name component, so nextentry is an acceptable test.
686 */ 699 */
687 next_sfep = dops->sf_nextentry(sfp, sfep); 700 next_sfep = dops->sf_nextentry(sfp, sfep);
688 XFS_WANT_CORRUPTED_RETURN(mp, endp >= (char *)next_sfep); 701 if (endp < (char *)next_sfep)
702 return -EFSCORRUPTED;
689 703
690 /* Check that the offsets always increase. */ 704 /* Check that the offsets always increase. */
691 XFS_WANT_CORRUPTED_RETURN(mp, 705 if (xfs_dir2_sf_get_offset(sfep) < offset)
692 xfs_dir2_sf_get_offset(sfep) >= offset); 706 return -EFSCORRUPTED;
693 707
694 /* Check the inode number. */ 708 /* Check the inode number. */
695 ino = dops->sf_get_ino(sfp, sfep); 709 ino = dops->sf_get_ino(sfp, sfep);
696 i8count += ino > XFS_DIR2_MAX_SHORT_INUM; 710 i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
697 XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino)); 711 error = xfs_dir_ino_validate(mp, ino);
712 if (error)
713 return error;
698 714
699 /* Check the file type. */ 715 /* Check the file type. */
700 filetype = dops->sf_get_ftype(sfep); 716 filetype = dops->sf_get_ftype(sfep);
701 XFS_WANT_CORRUPTED_RETURN(mp, filetype < XFS_DIR3_FT_MAX); 717 if (filetype >= XFS_DIR3_FT_MAX)
718 return -EFSCORRUPTED;
702 719
703 offset = xfs_dir2_sf_get_offset(sfep) + 720 offset = xfs_dir2_sf_get_offset(sfep) +
704 dops->data_entsize(sfep->namelen); 721 dops->data_entsize(sfep->namelen);
705 722
706 sfep = next_sfep; 723 sfep = next_sfep;
707 } 724 }
708 XFS_WANT_CORRUPTED_RETURN(mp, i8count == sfp->i8count); 725 if (i8count != sfp->i8count)
709 XFS_WANT_CORRUPTED_RETURN(mp, (void *)sfep == (void *)endp); 726 return -EFSCORRUPTED;
727 if ((void *)sfep != (void *)endp)
728 return -EFSCORRUPTED;
710 729
711 /* Make sure this whole thing ought to be in local format. */ 730 /* Make sure this whole thing ought to be in local format. */
712 XFS_WANT_CORRUPTED_RETURN(mp, offset + 731 if (offset + (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
713 (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) + 732 (uint)sizeof(xfs_dir2_block_tail_t) > mp->m_dir_geo->blksize)
714 (uint)sizeof(xfs_dir2_block_tail_t) <= mp->m_dir_geo->blksize); 733 return -EFSCORRUPTED;
715 734
716 return 0; 735 return 0;
717} 736}
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 9653e964eda4..8a37efe04de3 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -212,6 +212,16 @@ xfs_iformat_fork(
212 if (error) 212 if (error)
213 return error; 213 return error;
214 214
215 /* Check inline dir contents. */
216 if (S_ISDIR(VFS_I(ip)->i_mode) &&
217 dip->di_format == XFS_DINODE_FMT_LOCAL) {
218 error = xfs_dir2_sf_verify(ip);
219 if (error) {
220 xfs_idestroy_fork(ip, XFS_DATA_FORK);
221 return error;
222 }
223 }
224
215 if (xfs_is_reflink_inode(ip)) { 225 if (xfs_is_reflink_inode(ip)) {
216 ASSERT(ip->i_cowfp == NULL); 226 ASSERT(ip->i_cowfp == NULL);
217 xfs_ifork_init_cow(ip); 227 xfs_ifork_init_cow(ip);
@@ -322,8 +332,6 @@ xfs_iformat_local(
322 int whichfork, 332 int whichfork,
323 int size) 333 int size)
324{ 334{
325 int error;
326
327 /* 335 /*
328 * If the size is unreasonable, then something 336 * If the size is unreasonable, then something
329 * is wrong and we just bail out rather than crash in 337 * is wrong and we just bail out rather than crash in
@@ -339,14 +347,6 @@ xfs_iformat_local(
339 return -EFSCORRUPTED; 347 return -EFSCORRUPTED;
340 } 348 }
341 349
342 if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) {
343 error = xfs_dir2_sf_verify(ip->i_mount,
344 (struct xfs_dir2_sf_hdr *)XFS_DFORK_DPTR(dip),
345 size);
346 if (error)
347 return error;
348 }
349
350 xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size); 350 xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size);
351 return 0; 351 return 0;
352} 352}
@@ -867,7 +867,7 @@ xfs_iextents_copy(
867 * In these cases, the format always takes precedence, because the 867 * In these cases, the format always takes precedence, because the
868 * format indicates the current state of the fork. 868 * format indicates the current state of the fork.
869 */ 869 */
870int 870void
871xfs_iflush_fork( 871xfs_iflush_fork(
872 xfs_inode_t *ip, 872 xfs_inode_t *ip,
873 xfs_dinode_t *dip, 873 xfs_dinode_t *dip,
@@ -877,7 +877,6 @@ xfs_iflush_fork(
877 char *cp; 877 char *cp;
878 xfs_ifork_t *ifp; 878 xfs_ifork_t *ifp;
879 xfs_mount_t *mp; 879 xfs_mount_t *mp;
880 int error;
881 static const short brootflag[2] = 880 static const short brootflag[2] =
882 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT }; 881 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
883 static const short dataflag[2] = 882 static const short dataflag[2] =
@@ -886,7 +885,7 @@ xfs_iflush_fork(
886 { XFS_ILOG_DEXT, XFS_ILOG_AEXT }; 885 { XFS_ILOG_DEXT, XFS_ILOG_AEXT };
887 886
888 if (!iip) 887 if (!iip)
889 return 0; 888 return;
890 ifp = XFS_IFORK_PTR(ip, whichfork); 889 ifp = XFS_IFORK_PTR(ip, whichfork);
891 /* 890 /*
892 * This can happen if we gave up in iformat in an error path, 891 * This can happen if we gave up in iformat in an error path,
@@ -894,19 +893,12 @@ xfs_iflush_fork(
894 */ 893 */
895 if (!ifp) { 894 if (!ifp) {
896 ASSERT(whichfork == XFS_ATTR_FORK); 895 ASSERT(whichfork == XFS_ATTR_FORK);
897 return 0; 896 return;
898 } 897 }
899 cp = XFS_DFORK_PTR(dip, whichfork); 898 cp = XFS_DFORK_PTR(dip, whichfork);
900 mp = ip->i_mount; 899 mp = ip->i_mount;
901 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 900 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
902 case XFS_DINODE_FMT_LOCAL: 901 case XFS_DINODE_FMT_LOCAL:
903 if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) {
904 error = xfs_dir2_sf_verify(mp,
905 (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data,
906 ifp->if_bytes);
907 if (error)
908 return error;
909 }
910 if ((iip->ili_fields & dataflag[whichfork]) && 902 if ((iip->ili_fields & dataflag[whichfork]) &&
911 (ifp->if_bytes > 0)) { 903 (ifp->if_bytes > 0)) {
912 ASSERT(ifp->if_u1.if_data != NULL); 904 ASSERT(ifp->if_u1.if_data != NULL);
@@ -959,7 +951,6 @@ xfs_iflush_fork(
959 ASSERT(0); 951 ASSERT(0);
960 break; 952 break;
961 } 953 }
962 return 0;
963} 954}
964 955
965/* 956/*
diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h
index 132dc59fdde6..7fb8365326d1 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.h
+++ b/fs/xfs/libxfs/xfs_inode_fork.h
@@ -140,7 +140,7 @@ typedef struct xfs_ifork {
140struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state); 140struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state);
141 141
142int xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *); 142int xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *);
143int xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *, 143void xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *,
144 struct xfs_inode_log_item *, int); 144 struct xfs_inode_log_item *, int);
145void xfs_idestroy_fork(struct xfs_inode *, int); 145void xfs_idestroy_fork(struct xfs_inode *, int);
146void xfs_idata_realloc(struct xfs_inode *, int, int); 146void xfs_idata_realloc(struct xfs_inode *, int, int);
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 8b75dcea5966..828532ce0adc 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1311,8 +1311,16 @@ xfs_free_file_space(
1311 /* 1311 /*
1312 * Now that we've unmap all full blocks we'll have to zero out any 1312 * Now that we've unmap all full blocks we'll have to zero out any
1313 * partial block at the beginning and/or end. xfs_zero_range is 1313 * partial block at the beginning and/or end. xfs_zero_range is
1314 * smart enough to skip any holes, including those we just created. 1314 * smart enough to skip any holes, including those we just created,
1315 * but we must take care not to zero beyond EOF and enlarge i_size.
1315 */ 1316 */
1317
1318 if (offset >= XFS_ISIZE(ip))
1319 return 0;
1320
1321 if (offset + len > XFS_ISIZE(ip))
1322 len = XFS_ISIZE(ip) - offset;
1323
1316 return xfs_zero_range(ip, offset, len, NULL); 1324 return xfs_zero_range(ip, offset, len, NULL);
1317} 1325}
1318 1326
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index c7fe2c2123ab..7605d8396596 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -50,6 +50,7 @@
50#include "xfs_log.h" 50#include "xfs_log.h"
51#include "xfs_bmap_btree.h" 51#include "xfs_bmap_btree.h"
52#include "xfs_reflink.h" 52#include "xfs_reflink.h"
53#include "xfs_dir2_priv.h"
53 54
54kmem_zone_t *xfs_inode_zone; 55kmem_zone_t *xfs_inode_zone;
55 56
@@ -3475,7 +3476,6 @@ xfs_iflush_int(
3475 struct xfs_inode_log_item *iip = ip->i_itemp; 3476 struct xfs_inode_log_item *iip = ip->i_itemp;
3476 struct xfs_dinode *dip; 3477 struct xfs_dinode *dip;
3477 struct xfs_mount *mp = ip->i_mount; 3478 struct xfs_mount *mp = ip->i_mount;
3478 int error;
3479 3479
3480 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 3480 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3481 ASSERT(xfs_isiflocked(ip)); 3481 ASSERT(xfs_isiflocked(ip));
@@ -3547,6 +3547,12 @@ xfs_iflush_int(
3547 if (ip->i_d.di_version < 3) 3547 if (ip->i_d.di_version < 3)
3548 ip->i_d.di_flushiter++; 3548 ip->i_d.di_flushiter++;
3549 3549
3550 /* Check the inline directory data. */
3551 if (S_ISDIR(VFS_I(ip)->i_mode) &&
3552 ip->i_d.di_format == XFS_DINODE_FMT_LOCAL &&
3553 xfs_dir2_sf_verify(ip))
3554 goto corrupt_out;
3555
3550 /* 3556 /*
3551 * Copy the dirty parts of the inode into the on-disk inode. We always 3557 * Copy the dirty parts of the inode into the on-disk inode. We always
3552 * copy out the core of the inode, because if the inode is dirty at all 3558 * copy out the core of the inode, because if the inode is dirty at all
@@ -3558,14 +3564,9 @@ xfs_iflush_int(
3558 if (ip->i_d.di_flushiter == DI_MAX_FLUSH) 3564 if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3559 ip->i_d.di_flushiter = 0; 3565 ip->i_d.di_flushiter = 0;
3560 3566
3561 error = xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK); 3567 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3562 if (error) 3568 if (XFS_IFORK_Q(ip))
3563 return error; 3569 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3564 if (XFS_IFORK_Q(ip)) {
3565 error = xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3566 if (error)
3567 return error;
3568 }
3569 xfs_inobp_check(mp, bp); 3570 xfs_inobp_check(mp, bp);
3570 3571
3571 /* 3572 /*
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 229cc6a6d8ef..ebfc13350f9a 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -516,6 +516,20 @@ xfs_vn_getattr(
516 stat->blocks = 516 stat->blocks =
517 XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); 517 XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
518 518
519 if (ip->i_d.di_version == 3) {
520 if (request_mask & STATX_BTIME) {
521 stat->result_mask |= STATX_BTIME;
522 stat->btime.tv_sec = ip->i_d.di_crtime.t_sec;
523 stat->btime.tv_nsec = ip->i_d.di_crtime.t_nsec;
524 }
525 }
526
527 if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
528 stat->attributes |= STATX_ATTR_IMMUTABLE;
529 if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
530 stat->attributes |= STATX_ATTR_APPEND;
531 if (ip->i_d.di_flags & XFS_DIFLAG_NODUMP)
532 stat->attributes |= STATX_ATTR_NODUMP;
519 533
520 switch (inode->i_mode & S_IFMT) { 534 switch (inode->i_mode & S_IFMT) {
521 case S_IFBLK: 535 case S_IFBLK:
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 2a6d9b1558e0..26d67ce3c18d 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -583,7 +583,7 @@ xfs_inumbers(
583 return error; 583 return error;
584 584
585 bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer))); 585 bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer)));
586 buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP); 586 buffer = kmem_zalloc(bcount * sizeof(*buffer), KM_SLEEP);
587 do { 587 do {
588 struct xfs_inobt_rec_incore r; 588 struct xfs_inobt_rec_incore r;
589 int stat; 589 int stat;
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 4df64a1fc09e..532372c6cf15 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -14,8 +14,8 @@
14 * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.* 14 * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.*
15 * and/or .init.* sections. 15 * and/or .init.* sections.
16 * [__start_rodata, __end_rodata]: contains .rodata.* sections 16 * [__start_rodata, __end_rodata]: contains .rodata.* sections
17 * [__start_data_ro_after_init, __end_data_ro_after_init]: 17 * [__start_ro_after_init, __end_ro_after_init]:
18 * contains data.ro_after_init section 18 * contains .data..ro_after_init section
19 * [__init_begin, __init_end]: contains .init.* sections, but .init.text.* 19 * [__init_begin, __init_end]: contains .init.* sections, but .init.text.*
20 * may be out of this range on some architectures. 20 * may be out of this range on some architectures.
21 * [_sinittext, _einittext]: contains .init.text.* sections 21 * [_sinittext, _einittext]: contains .init.text.* sections
@@ -33,7 +33,7 @@ extern char _data[], _sdata[], _edata[];
33extern char __bss_start[], __bss_stop[]; 33extern char __bss_start[], __bss_stop[];
34extern char __init_begin[], __init_end[]; 34extern char __init_begin[], __init_end[];
35extern char _sinittext[], _einittext[]; 35extern char _sinittext[], _einittext[];
36extern char __start_data_ro_after_init[], __end_data_ro_after_init[]; 36extern char __start_ro_after_init[], __end_ro_after_init[];
37extern char _end[]; 37extern char _end[];
38extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[]; 38extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
39extern char __kprobes_text_start[], __kprobes_text_end[]; 39extern char __kprobes_text_start[], __kprobes_text_end[];
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 0968d13b3885..143db9c523e2 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -173,6 +173,7 @@
173 KEEP(*(__##name##_of_table_end)) 173 KEEP(*(__##name##_of_table_end))
174 174
175#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc) 175#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
176#define CLKEVT_OF_TABLES() OF_TABLE(CONFIG_CLKEVT_OF, clkevt)
176#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) 177#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
177#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) 178#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
178#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu) 179#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu)
@@ -260,9 +261,9 @@
260 */ 261 */
261#ifndef RO_AFTER_INIT_DATA 262#ifndef RO_AFTER_INIT_DATA
262#define RO_AFTER_INIT_DATA \ 263#define RO_AFTER_INIT_DATA \
263 __start_data_ro_after_init = .; \ 264 VMLINUX_SYMBOL(__start_ro_after_init) = .; \
264 *(.data..ro_after_init) \ 265 *(.data..ro_after_init) \
265 __end_data_ro_after_init = .; 266 VMLINUX_SYMBOL(__end_ro_after_init) = .;
266#endif 267#endif
267 268
268/* 269/*
@@ -559,6 +560,7 @@
559 CLK_OF_TABLES() \ 560 CLK_OF_TABLES() \
560 RESERVEDMEM_OF_TABLES() \ 561 RESERVEDMEM_OF_TABLES() \
561 CLKSRC_OF_TABLES() \ 562 CLKSRC_OF_TABLES() \
563 CLKEVT_OF_TABLES() \
562 IOMMU_OF_TABLES() \ 564 IOMMU_OF_TABLES() \
563 CPU_METHOD_OF_TABLES() \ 565 CPU_METHOD_OF_TABLES() \
564 CPUIDLE_METHOD_OF_TABLES() \ 566 CPUIDLE_METHOD_OF_TABLES() \
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index bcceee8114a4..ed599bea3f6c 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -14,6 +14,67 @@
14 14
15struct dw_hdmi; 15struct dw_hdmi;
16 16
17/**
18 * DOC: Supported input formats and encodings
19 *
20 * Depending on the Hardware configuration of the Controller IP, it supports
21 * a subset of the following input formats and encodings on its internal
22 * 48bit bus.
23 *
24 * +----------------------+----------------------------------+------------------------------+
25 * + Format Name + Format Code + Encodings +
26 * +----------------------+----------------------------------+------------------------------+
27 * + RGB 4:4:4 8bit + ``MEDIA_BUS_FMT_RGB888_1X24`` + ``V4L2_YCBCR_ENC_DEFAULT`` +
28 * +----------------------+----------------------------------+------------------------------+
29 * + RGB 4:4:4 10bits + ``MEDIA_BUS_FMT_RGB101010_1X30`` + ``V4L2_YCBCR_ENC_DEFAULT`` +
30 * +----------------------+----------------------------------+------------------------------+
31 * + RGB 4:4:4 12bits + ``MEDIA_BUS_FMT_RGB121212_1X36`` + ``V4L2_YCBCR_ENC_DEFAULT`` +
32 * +----------------------+----------------------------------+------------------------------+
33 * + RGB 4:4:4 16bits + ``MEDIA_BUS_FMT_RGB161616_1X48`` + ``V4L2_YCBCR_ENC_DEFAULT`` +
34 * +----------------------+----------------------------------+------------------------------+
35 * + YCbCr 4:4:4 8bit + ``MEDIA_BUS_FMT_YUV8_1X24`` + ``V4L2_YCBCR_ENC_601`` +
36 * + + + or ``V4L2_YCBCR_ENC_709`` +
37 * + + + or ``V4L2_YCBCR_ENC_XV601`` +
38 * + + + or ``V4L2_YCBCR_ENC_XV709`` +
39 * +----------------------+----------------------------------+------------------------------+
40 * + YCbCr 4:4:4 10bits + ``MEDIA_BUS_FMT_YUV10_1X30`` + ``V4L2_YCBCR_ENC_601`` +
41 * + + + or ``V4L2_YCBCR_ENC_709`` +
42 * + + + or ``V4L2_YCBCR_ENC_XV601`` +
43 * + + + or ``V4L2_YCBCR_ENC_XV709`` +
44 * +----------------------+----------------------------------+------------------------------+
45 * + YCbCr 4:4:4 12bits + ``MEDIA_BUS_FMT_YUV12_1X36`` + ``V4L2_YCBCR_ENC_601`` +
46 * + + + or ``V4L2_YCBCR_ENC_709`` +
47 * + + + or ``V4L2_YCBCR_ENC_XV601`` +
48 * + + + or ``V4L2_YCBCR_ENC_XV709`` +
49 * +----------------------+----------------------------------+------------------------------+
50 * + YCbCr 4:4:4 16bits + ``MEDIA_BUS_FMT_YUV16_1X48`` + ``V4L2_YCBCR_ENC_601`` +
51 * + + + or ``V4L2_YCBCR_ENC_709`` +
52 * + + + or ``V4L2_YCBCR_ENC_XV601`` +
53 * + + + or ``V4L2_YCBCR_ENC_XV709`` +
54 * +----------------------+----------------------------------+------------------------------+
55 * + YCbCr 4:2:2 8bit + ``MEDIA_BUS_FMT_UYVY8_1X16`` + ``V4L2_YCBCR_ENC_601`` +
56 * + + + or ``V4L2_YCBCR_ENC_709`` +
57 * +----------------------+----------------------------------+------------------------------+
58 * + YCbCr 4:2:2 10bits + ``MEDIA_BUS_FMT_UYVY10_1X20`` + ``V4L2_YCBCR_ENC_601`` +
59 * + + + or ``V4L2_YCBCR_ENC_709`` +
60 * +----------------------+----------------------------------+------------------------------+
61 * + YCbCr 4:2:2 12bits + ``MEDIA_BUS_FMT_UYVY12_1X24`` + ``V4L2_YCBCR_ENC_601`` +
62 * + + + or ``V4L2_YCBCR_ENC_709`` +
63 * +----------------------+----------------------------------+------------------------------+
64 * + YCbCr 4:2:0 8bit + ``MEDIA_BUS_FMT_UYYVYY8_0_5X24`` + ``V4L2_YCBCR_ENC_601`` +
65 * + + + or ``V4L2_YCBCR_ENC_709`` +
66 * +----------------------+----------------------------------+------------------------------+
67 * + YCbCr 4:2:0 10bits + ``MEDIA_BUS_FMT_UYYVYY10_0_5X30``+ ``V4L2_YCBCR_ENC_601`` +
68 * + + + or ``V4L2_YCBCR_ENC_709`` +
69 * +----------------------+----------------------------------+------------------------------+
70 * + YCbCr 4:2:0 12bits + ``MEDIA_BUS_FMT_UYYVYY12_0_5X36``+ ``V4L2_YCBCR_ENC_601`` +
71 * + + + or ``V4L2_YCBCR_ENC_709`` +
72 * +----------------------+----------------------------------+------------------------------+
73 * + YCbCr 4:2:0 16bits + ``MEDIA_BUS_FMT_UYYVYY16_0_5X48``+ ``V4L2_YCBCR_ENC_601`` +
74 * + + + or ``V4L2_YCBCR_ENC_709`` +
75 * +----------------------+----------------------------------+------------------------------+
76 */
77
17enum { 78enum {
18 DW_HDMI_RES_8, 79 DW_HDMI_RES_8,
19 DW_HDMI_RES_10, 80 DW_HDMI_RES_10,
@@ -56,12 +117,17 @@ struct dw_hdmi_phy_ops {
56 struct drm_display_mode *mode); 117 struct drm_display_mode *mode);
57 void (*disable)(struct dw_hdmi *hdmi, void *data); 118 void (*disable)(struct dw_hdmi *hdmi, void *data);
58 enum drm_connector_status (*read_hpd)(struct dw_hdmi *hdmi, void *data); 119 enum drm_connector_status (*read_hpd)(struct dw_hdmi *hdmi, void *data);
120 void (*update_hpd)(struct dw_hdmi *hdmi, void *data,
121 bool force, bool disabled, bool rxsense);
122 void (*setup_hpd)(struct dw_hdmi *hdmi, void *data);
59}; 123};
60 124
61struct dw_hdmi_plat_data { 125struct dw_hdmi_plat_data {
62 struct regmap *regm; 126 struct regmap *regm;
63 enum drm_mode_status (*mode_valid)(struct drm_connector *connector, 127 enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
64 struct drm_display_mode *mode); 128 struct drm_display_mode *mode);
129 unsigned long input_bus_format;
130 unsigned long input_bus_encoding;
65 131
66 /* Vendor PHY support */ 132 /* Vendor PHY support */
67 const struct dw_hdmi_phy_ops *phy_ops; 133 const struct dw_hdmi_phy_ops *phy_ops;
@@ -84,6 +150,8 @@ void dw_hdmi_unbind(struct device *dev);
84int dw_hdmi_bind(struct platform_device *pdev, struct drm_encoder *encoder, 150int dw_hdmi_bind(struct platform_device *pdev, struct drm_encoder *encoder,
85 const struct dw_hdmi_plat_data *plat_data); 151 const struct dw_hdmi_plat_data *plat_data);
86 152
153void dw_hdmi_setup_rx_sense(struct device *dev, bool hpd, bool rx_sense);
154
87void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate); 155void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
88void dw_hdmi_audio_enable(struct dw_hdmi *hdmi); 156void dw_hdmi_audio_enable(struct dw_hdmi *hdmi);
89void dw_hdmi_audio_disable(struct dw_hdmi *hdmi); 157void dw_hdmi_audio_disable(struct dw_hdmi *hdmi);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 3bfafcdb8710..e1daa4f343cd 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -80,6 +80,7 @@
80#include <drm/drm_file.h> 80#include <drm/drm_file.h>
81#include <drm/drm_debugfs.h> 81#include <drm/drm_debugfs.h>
82#include <drm/drm_ioctl.h> 82#include <drm/drm_ioctl.h>
83#include <drm/drm_sysfs.h>
83 84
84struct module; 85struct module;
85 86
@@ -512,10 +513,6 @@ static inline int drm_device_is_unplugged(struct drm_device *dev)
512 * DMA quiscent + idle. DMA quiescent usually requires the hardware lock. 513 * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
513 */ 514 */
514 515
515 /* sysfs support (drm_sysfs.c) */
516extern void drm_sysfs_hotplug_event(struct drm_device *dev);
517
518
519/*@}*/ 516/*@}*/
520 517
521/* returns true if currently okay to sleep */ 518/* returns true if currently okay to sleep */
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index fd33ed5eaeb4..788daf756f48 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -160,7 +160,6 @@ struct __drm_connnectors_state {
160 * @dev: parent DRM device 160 * @dev: parent DRM device
161 * @allow_modeset: allow full modeset 161 * @allow_modeset: allow full modeset
162 * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics 162 * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
163 * @legacy_set_config: Disable conflicting encoders instead of failing with -EINVAL.
164 * @planes: pointer to array of structures with per-plane data 163 * @planes: pointer to array of structures with per-plane data
165 * @crtcs: pointer to array of CRTC pointers 164 * @crtcs: pointer to array of CRTC pointers
166 * @num_connector: size of the @connectors and @connector_states arrays 165 * @num_connector: size of the @connectors and @connector_states arrays
@@ -173,7 +172,6 @@ struct drm_atomic_state {
173 struct drm_device *dev; 172 struct drm_device *dev;
174 bool allow_modeset : 1; 173 bool allow_modeset : 1;
175 bool legacy_cursor_update : 1; 174 bool legacy_cursor_update : 1;
176 bool legacy_set_config : 1;
177 struct __drm_planes_state *planes; 175 struct __drm_planes_state *planes;
178 struct __drm_crtcs_state *crtcs; 176 struct __drm_crtcs_state *crtcs;
179 int num_connector; 177 int num_connector;
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index fd395dc050ee..f0a8678ae98e 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -176,7 +176,8 @@ void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
176 struct drm_connector_state *state); 176 struct drm_connector_state *state);
177int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, 177int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
178 u16 *red, u16 *green, u16 *blue, 178 u16 *red, u16 *green, u16 *blue,
179 uint32_t size); 179 uint32_t size,
180 struct drm_modeset_acquire_ctx *ctx);
180 181
181/** 182/**
182 * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC 183 * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 8250062eb8a5..4eeda120e46d 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -32,6 +32,7 @@
32struct drm_device; 32struct drm_device;
33 33
34struct drm_connector_helper_funcs; 34struct drm_connector_helper_funcs;
35struct drm_modeset_acquire_ctx;
35struct drm_device; 36struct drm_device;
36struct drm_crtc; 37struct drm_crtc;
37struct drm_encoder; 38struct drm_encoder;
@@ -382,6 +383,11 @@ struct drm_connector_funcs {
382 * the helper library vtable purely for historical reasons. The only DRM 383 * the helper library vtable purely for historical reasons. The only DRM
383 * core entry point to probe connector state is @fill_modes. 384 * core entry point to probe connector state is @fill_modes.
384 * 385 *
386 * Note that the helper library will already hold
387 * &drm_mode_config.connection_mutex. Drivers which need to grab additional
388 * locks to avoid races with concurrent modeset changes need to use
389 * &drm_connector_helper_funcs.detect_ctx instead.
390 *
385 * RETURNS: 391 * RETURNS:
386 * 392 *
387 * drm_connector_status indicating the connector's status. 393 * drm_connector_status indicating the connector's status.
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 2be2192b1373..a8176a836e25 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -322,7 +322,8 @@ struct drm_crtc_funcs {
322 * hooks. 322 * hooks.
323 */ 323 */
324 int (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, 324 int (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
325 uint32_t size); 325 uint32_t size,
326 struct drm_modeset_acquire_ctx *ctx);
326 327
327 /** 328 /**
328 * @destroy: 329 * @destroy:
@@ -782,15 +783,6 @@ struct drm_crtc {
782 */ 783 */
783 spinlock_t commit_lock; 784 spinlock_t commit_lock;
784 785
785 /**
786 * @acquire_ctx:
787 *
788 * Per-CRTC implicit acquire context used by atomic drivers for legacy
789 * IOCTLs, so that atomic drivers can get at the locking acquire
790 * context.
791 */
792 struct drm_modeset_acquire_ctx *acquire_ctx;
793
794#ifdef CONFIG_DEBUG_FS 786#ifdef CONFIG_DEBUG_FS
795 /** 787 /**
796 * @debugfs_entry: 788 * @debugfs_entry:
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 43505c7b2b3f..76e237bd989b 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -67,6 +67,9 @@ int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
67int drm_helper_probe_single_connector_modes(struct drm_connector 67int drm_helper_probe_single_connector_modes(struct drm_connector
68 *connector, uint32_t maxX, 68 *connector, uint32_t maxX,
69 uint32_t maxY); 69 uint32_t maxY);
70int drm_helper_probe_detect(struct drm_connector *connector,
71 struct drm_modeset_acquire_ctx *ctx,
72 bool force);
70void drm_kms_helper_poll_init(struct drm_device *dev); 73void drm_kms_helper_poll_init(struct drm_device *dev);
71void drm_kms_helper_poll_fini(struct drm_device *dev); 74void drm_kms_helper_poll_fini(struct drm_device *dev);
72bool drm_helper_hpd_irq_event(struct drm_device *dev); 75bool drm_helper_hpd_irq_event(struct drm_device *dev);
diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h
index f17ee077f649..ee03b3c44b3b 100644
--- a/include/drm/drm_ioctl.h
+++ b/include/drm/drm_ioctl.h
@@ -33,6 +33,7 @@
33#define _DRM_IOCTL_H_ 33#define _DRM_IOCTL_H_
34 34
35#include <linux/types.h> 35#include <linux/types.h>
36#include <linux/bitops.h>
36 37
37#include <asm/ioctl.h> 38#include <asm/ioctl.h>
38 39
@@ -41,41 +42,126 @@ struct drm_file;
41struct file; 42struct file;
42 43
43/** 44/**
44 * Ioctl function type. 45 * drm_ioctl_t - DRM ioctl function type.
46 * @dev: DRM device inode
47 * @data: private pointer of the ioctl call
48 * @file_priv: DRM file this ioctl was made on
45 * 49 *
46 * \param inode device inode. 50 * This is the DRM ioctl typedef. Note that drm_ioctl() has alrady copied @data
47 * \param file_priv DRM file private pointer. 51 * into kernel-space, and will also copy it back, depending upon the read/write
48 * \param cmd command. 52 * settings in the ioctl command code.
49 * \param arg argument.
50 */ 53 */
51typedef int drm_ioctl_t(struct drm_device *dev, void *data, 54typedef int drm_ioctl_t(struct drm_device *dev, void *data,
52 struct drm_file *file_priv); 55 struct drm_file *file_priv);
53 56
57/**
58 * drm_ioctl_compat_t - compatibility DRM ioctl function type.
59 * @filp: file pointer
60 * @cmd: ioctl command code
61 * @arg: DRM file this ioctl was made on
62 *
63 * Just a typedef to make declaring an array of compatibility handlers easier.
64 * New drivers shouldn't screw up the structure layout for their ioctl
65 * structures and hence never need this.
66 */
54typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, 67typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
55 unsigned long arg); 68 unsigned long arg);
56 69
57#define DRM_IOCTL_NR(n) _IOC_NR(n) 70#define DRM_IOCTL_NR(n) _IOC_NR(n)
58#define DRM_MAJOR 226 71#define DRM_MAJOR 226
59 72
60#define DRM_AUTH 0x1 73/**
61#define DRM_MASTER 0x2 74 * enum drm_ioctl_flags - DRM ioctl flags
62#define DRM_ROOT_ONLY 0x4 75 *
63#define DRM_CONTROL_ALLOW 0x8 76 * Various flags that can be set in &drm_ioctl_desc.flags to control how
64#define DRM_UNLOCKED 0x10 77 * userspace can use a given ioctl.
65#define DRM_RENDER_ALLOW 0x20 78 */
79enum drm_ioctl_flags {
80 /**
81 * @DRM_AUTH:
82 *
83 * This is for ioctl which are used for rendering, and require that the
84 * file descriptor is either for a render node, or if it's a
85 * legacy/primary node, then it must be authenticated.
86 */
87 DRM_AUTH = BIT(0),
88 /**
89 * @DRM_MASTER:
90 *
91 * This must be set for any ioctl which can change the modeset or
92 * display state. Userspace must call the ioctl through a primary node,
93 * while it is the active master.
94 *
95 * Note that read-only modeset ioctl can also be called by
96 * unauthenticated clients, or when a master is not the currently active
97 * one.
98 */
99 DRM_MASTER = BIT(1),
100 /**
101 * @DRM_ROOT_ONLY:
102 *
103 * Anything that could potentially wreak a master file descriptor needs
104 * to have this flag set. Current that's only for the SETMASTER and
105 * DROPMASTER ioctl, which e.g. logind can call to force a non-behaving
106 * master (display compositor) into compliance.
107 *
108 * This is equivalent to callers with the SYSADMIN capability.
109 */
110 DRM_ROOT_ONLY = BIT(2),
111 /**
112 * @DRM_CONTROL_ALLOW:
113 *
114 * Deprecated, do not use. Control nodes are in the process of getting
115 * removed.
116 */
117 DRM_CONTROL_ALLOW = BIT(3),
118 /**
119 * @DRM_UNLOCKED:
120 *
121 * Whether &drm_ioctl_desc.func should be called with the DRM BKL held
122 * or not. Enforced as the default for all modern drivers, hence there
123 * should never be a need to set this flag.
124 */
125 DRM_UNLOCKED = BIT(4),
126 /**
127 * @DRM_RENDER_ALLOW:
128 *
129 * This is used for all ioctl needed for rendering only, for drivers
130 * which support render nodes. This should be all new render drivers,
131 * and hence it should be always set for any ioctl with DRM_AUTH set.
132 * Note though that read-only query ioctl might have this set, but have
133 * not set DRM_AUTH because they do not require authentication.
134 */
135 DRM_RENDER_ALLOW = BIT(5),
136};
66 137
138/**
139 * struct drm_ioctl_desc - DRM driver ioctl entry
140 * @cmd: ioctl command number, without flags
141 * @flags: a bitmask of &enum drm_ioctl_flags
142 * @func: handler for this ioctl
143 * @name: user-readable name for debug output
144 *
145 * For convenience it's easier to create these using the DRM_IOCTL_DEF_DRV()
146 * macro.
147 */
67struct drm_ioctl_desc { 148struct drm_ioctl_desc {
68 unsigned int cmd; 149 unsigned int cmd;
69 int flags; 150 enum drm_ioctl_flags flags;
70 drm_ioctl_t *func; 151 drm_ioctl_t *func;
71 const char *name; 152 const char *name;
72}; 153};
73 154
74/** 155/**
75 * Creates a driver or general drm_ioctl_desc array entry for the given 156 * DRM_IOCTL_DEF_DRV() - helper macro to fill out a &struct drm_ioctl_desc
76 * ioctl, for use by drm_ioctl(). 157 * @ioctl: ioctl command suffix
158 * @_func: handler for the ioctl
159 * @_flags: a bitmask of &enum drm_ioctl_flags
160 *
161 * Small helper macro to create a &struct drm_ioctl_desc entry. The ioctl
162 * command number is constructed by prepending ``DRM_IOCTL\_`` and passing that
163 * to DRM_IOCTL_NR().
77 */ 164 */
78
79#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \ 165#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \
80 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = { \ 166 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = { \
81 .cmd = DRM_IOCTL_##ioctl, \ 167 .cmd = DRM_IOCTL_##ioctl, \
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 091c42205667..c01c328f6cc8 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -747,6 +747,10 @@ struct drm_connector_helper_funcs {
747 * This callback is used by the probe helpers in e.g. 747 * This callback is used by the probe helpers in e.g.
748 * drm_helper_probe_single_connector_modes(). 748 * drm_helper_probe_single_connector_modes().
749 * 749 *
750 * To avoid races with concurrent connector state updates, the helper
751 * libraries always call this with the &drm_mode_config.connection_mutex
752 * held. Because of this it's safe to inspect &drm_connector->state.
753 *
750 * RETURNS: 754 * RETURNS:
751 * 755 *
752 * The number of modes added by calling drm_mode_probed_add(). 756 * The number of modes added by calling drm_mode_probed_add().
@@ -754,6 +758,34 @@ struct drm_connector_helper_funcs {
754 int (*get_modes)(struct drm_connector *connector); 758 int (*get_modes)(struct drm_connector *connector);
755 759
756 /** 760 /**
761 * @detect_ctx:
762 *
763 * Check to see if anything is attached to the connector. The parameter
764 * force is set to false whilst polling, true when checking the
765 * connector due to a user request. force can be used by the driver to
766 * avoid expensive, destructive operations during automated probing.
767 *
768 * This callback is optional, if not implemented the connector will be
769 * considered as always being attached.
770 *
771 * This is the atomic version of &drm_connector_funcs.detect.
772 *
773 * To avoid races against concurrent connector state updates, the
774 * helper libraries always call this with ctx set to a valid context,
775 * and &drm_mode_config.connection_mutex will always be locked with
776 * the ctx parameter set to this ctx. This allows taking additional
777 * locks as required.
778 *
779 * RETURNS:
780 *
781 * &drm_connector_status indicating the connector's status,
782 * or the error code returned by drm_modeset_lock(), -EDEADLK.
783 */
784 int (*detect_ctx)(struct drm_connector *connector,
785 struct drm_modeset_acquire_ctx *ctx,
786 bool force);
787
788 /**
757 * @mode_valid: 789 * @mode_valid:
758 * 790 *
759 * Callback to validate a mode for a connector, irrespective of the 791 * Callback to validate a mode for a connector, irrespective of the
@@ -771,6 +803,10 @@ struct drm_connector_helper_funcs {
771 * CRTC helpers will not call this function. Drivers therefore must 803 * CRTC helpers will not call this function. Drivers therefore must
772 * still fully validate any mode passed in in a modeset request. 804 * still fully validate any mode passed in in a modeset request.
773 * 805 *
806 * To avoid races with concurrent connector state updates, the helper
807 * libraries always call this with the &drm_mode_config.connection_mutex
808 * held. Because of this it's safe to inspect &drm_connector->state.
809 *
774 * RETURNS: 810 * RETURNS:
775 * 811 *
776 * Either &drm_mode_status.MODE_OK or one of the failure reasons in &enum 812 * Either &drm_mode_status.MODE_OK or one of the failure reasons in &enum
@@ -836,6 +872,40 @@ struct drm_connector_helper_funcs {
836 */ 872 */
837 struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector, 873 struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector,
838 struct drm_connector_state *connector_state); 874 struct drm_connector_state *connector_state);
875
876 /**
877 * @atomic_check:
878 *
879 * This hook is used to validate connector state. This function is
880 * called from &drm_atomic_helper_check_modeset, and is called when
881 * a connector property is set, or a modeset on the crtc is forced.
882 *
883 * Because &drm_atomic_helper_check_modeset may be called multiple times,
884 * this function should handle being called multiple times as well.
885 *
886 * This function is also allowed to inspect any other object's state and
887 * can add more state objects to the atomic commit if needed. Care must
888 * be taken though to ensure that state check and compute functions for
889 * these added states are all called, and derived state in other objects
890 * all updated. Again the recommendation is to just call check helpers
891 * until a maximal configuration is reached.
892 *
893 * NOTE:
894 *
895 * This function is called in the check phase of an atomic update. The
896 * driver is not allowed to change anything outside of the free-standing
897 * state objects passed-in or assembled in the overall &drm_atomic_state
898 * update tracking structure.
899 *
900 * RETURNS:
901 *
902 * 0 on success, -EINVAL if the state or the transition can't be
903 * supported, -ENOMEM on memory allocation failure and -EDEADLK if an
904 * attempt to obtain another state object ran into a &drm_modeset_lock
905 * deadlock.
906 */
907 int (*atomic_check)(struct drm_connector *connector,
908 struct drm_connector_state *state);
839}; 909};
840 910
841/** 911/**
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index 96d39fbd12ca..4b27c2bb955c 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -121,12 +121,7 @@ struct drm_plane;
121 121
122void drm_modeset_lock_all(struct drm_device *dev); 122void drm_modeset_lock_all(struct drm_device *dev);
123void drm_modeset_unlock_all(struct drm_device *dev); 123void drm_modeset_unlock_all(struct drm_device *dev);
124void drm_modeset_lock_crtc(struct drm_crtc *crtc,
125 struct drm_plane *plane);
126void drm_modeset_unlock_crtc(struct drm_crtc *crtc);
127void drm_warn_on_modeset_not_all_locked(struct drm_device *dev); 124void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
128struct drm_modeset_acquire_ctx *
129drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc);
130 125
131int drm_modeset_lock_all_ctx(struct drm_device *dev, 126int drm_modeset_lock_all_ctx(struct drm_device *dev,
132 struct drm_modeset_acquire_ctx *ctx); 127 struct drm_modeset_acquire_ctx *ctx);
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index d1fc563f068a..104dd517fdbe 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -8,6 +8,8 @@ struct component_match;
8struct device; 8struct device;
9struct drm_device; 9struct drm_device;
10struct drm_encoder; 10struct drm_encoder;
11struct drm_panel;
12struct drm_bridge;
11struct device_node; 13struct device_node;
12 14
13#ifdef CONFIG_OF 15#ifdef CONFIG_OF
@@ -23,6 +25,10 @@ int drm_of_component_probe(struct device *dev,
23int drm_of_encoder_active_endpoint(struct device_node *node, 25int drm_of_encoder_active_endpoint(struct device_node *node,
24 struct drm_encoder *encoder, 26 struct drm_encoder *encoder,
25 struct of_endpoint *endpoint); 27 struct of_endpoint *endpoint);
28int drm_of_find_panel_or_bridge(const struct device_node *np,
29 int port, int endpoint,
30 struct drm_panel **panel,
31 struct drm_bridge **bridge);
26#else 32#else
27static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, 33static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
28 struct device_node *port) 34 struct device_node *port)
@@ -52,6 +58,13 @@ static inline int drm_of_encoder_active_endpoint(struct device_node *node,
52{ 58{
53 return -EINVAL; 59 return -EINVAL;
54} 60}
61static inline int drm_of_find_panel_or_bridge(const struct device_node *np,
62 int port, int endpoint,
63 struct drm_panel **panel,
64 struct drm_bridge **bridge)
65{
66 return -EINVAL;
67}
55#endif 68#endif
56 69
57static inline int drm_of_encoder_active_endpoint_id(struct device_node *node, 70static inline int drm_of_encoder_active_endpoint_id(struct device_node *node,
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 4b76cf2d5a7b..1b364b0100f4 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -192,7 +192,7 @@ void drm_panel_remove(struct drm_panel *panel);
192int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector); 192int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector);
193int drm_panel_detach(struct drm_panel *panel); 193int drm_panel_detach(struct drm_panel *panel);
194 194
195#ifdef CONFIG_OF 195#if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL)
196struct drm_panel *of_drm_find_panel(const struct device_node *np); 196struct drm_panel *of_drm_find_panel(const struct device_node *np);
197#else 197#else
198static inline struct drm_panel *of_drm_find_panel(const struct device_node *np) 198static inline struct drm_panel *of_drm_find_panel(const struct device_node *np)
diff --git a/include/drm/drm_sysfs.h b/include/drm/drm_sysfs.h
index 23418c1f10d1..70c9a1074aca 100644
--- a/include/drm/drm_sysfs.h
+++ b/include/drm/drm_sysfs.h
@@ -1,12 +1,12 @@
1#ifndef _DRM_SYSFS_H_ 1#ifndef _DRM_SYSFS_H_
2#define _DRM_SYSFS_H_ 2#define _DRM_SYSFS_H_
3 3
4/** 4struct drm_device;
5 * This minimalistic include file is intended for users (read TTM) that 5struct device;
6 * don't want to include the full drmP.h file.
7 */
8 6
9int drm_class_device_register(struct device *dev); 7int drm_class_device_register(struct device *dev);
10void drm_class_device_unregister(struct device *dev); 8void drm_class_device_unregister(struct device *dev);
11 9
10void drm_sysfs_hotplug_event(struct drm_device *dev);
11
12#endif 12#endif
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 0b1ce05e2c2e..fa07be197945 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -711,6 +711,17 @@ extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
711 struct ttm_buffer_object *bo); 711 struct ttm_buffer_object *bo);
712 712
713/** 713/**
714 * ttm_bo_default_iomem_pfn - get a pfn for a page offset
715 *
716 * @bo: the BO we need to look up the pfn for
717 * @page_offset: offset inside the BO to look up.
718 *
719 * Calculate the PFN for iomem based mappings during page fault
720 */
721unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo,
722 unsigned long page_offset);
723
724/**
714 * ttm_bo_mmap - mmap out of the ttm device address space. 725 * ttm_bo_mmap - mmap out of the ttm device address space.
715 * 726 *
716 * @filp: filp as input from the mmap method. 727 * @filp: filp as input from the mmap method.
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 3641c6128ac2..6bbd34d25a8d 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -462,6 +462,15 @@ struct ttm_bo_driver {
462 struct ttm_mem_reg *mem); 462 struct ttm_mem_reg *mem);
463 void (*io_mem_free)(struct ttm_bo_device *bdev, 463 void (*io_mem_free)(struct ttm_bo_device *bdev,
464 struct ttm_mem_reg *mem); 464 struct ttm_mem_reg *mem);
465
466 /**
467 * Return the pfn for a given page_offset inside the BO.
468 *
469 * @bo: the BO to look up the pfn for
470 * @page_offset: the offset to look up
471 */
472 unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
473 unsigned long page_offset);
465}; 474};
466 475
467/** 476/**
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
index ed953f98f0e1..1487011fe057 100644
--- a/include/drm/ttm/ttm_object.h
+++ b/include/drm/ttm/ttm_object.h
@@ -229,6 +229,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
229 * @ref_type: The type of reference. 229 * @ref_type: The type of reference.
230 * @existed: Upon completion, indicates that an identical reference object 230 * @existed: Upon completion, indicates that an identical reference object
231 * already existed, and the refcount was upped on that object instead. 231 * already existed, and the refcount was upped on that object instead.
232 * @require_existed: Fail with -EPERM if an identical ref object didn't
233 * already exist.
232 * 234 *
233 * Checks that the base object is shareable and adds a ref object to it. 235 * Checks that the base object is shareable and adds a ref object to it.
234 * 236 *
@@ -243,7 +245,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
243 */ 245 */
244extern int ttm_ref_object_add(struct ttm_object_file *tfile, 246extern int ttm_ref_object_add(struct ttm_object_file *tfile,
245 struct ttm_base_object *base, 247 struct ttm_base_object *base,
246 enum ttm_ref_type ref_type, bool *existed); 248 enum ttm_ref_type ref_type, bool *existed,
249 bool require_existed);
247 250
248extern bool ttm_ref_object_exists(struct ttm_object_file *tfile, 251extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
249 struct ttm_base_object *base); 252 struct ttm_base_object *base);
diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h
index 932be0c8086e..e88a8e39767b 100644
--- a/include/drm/ttm/ttm_placement.h
+++ b/include/drm/ttm/ttm_placement.h
@@ -63,6 +63,7 @@
63#define TTM_PL_FLAG_CACHED (1 << 16) 63#define TTM_PL_FLAG_CACHED (1 << 16)
64#define TTM_PL_FLAG_UNCACHED (1 << 17) 64#define TTM_PL_FLAG_UNCACHED (1 << 17)
65#define TTM_PL_FLAG_WC (1 << 18) 65#define TTM_PL_FLAG_WC (1 << 18)
66#define TTM_PL_FLAG_CONTIGUOUS (1 << 19)
66#define TTM_PL_FLAG_NO_EVICT (1 << 21) 67#define TTM_PL_FLAG_NO_EVICT (1 << 21)
67#define TTM_PL_FLAG_TOPDOWN (1 << 22) 68#define TTM_PL_FLAG_TOPDOWN (1 << 22)
68 69
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index b72dd2ad5f44..c0b3d999c266 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -295,6 +295,7 @@ void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu);
295void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu); 295void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
296int kvm_vgic_map_resources(struct kvm *kvm); 296int kvm_vgic_map_resources(struct kvm *kvm);
297int kvm_vgic_hyp_init(void); 297int kvm_vgic_hyp_init(void);
298void kvm_vgic_init_cpu_hardware(void);
298 299
299int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, 300int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
300 bool level); 301 bool level);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index b296a9006117..9382c5da7a2e 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -51,6 +51,7 @@ struct blk_mq_hw_ctx {
51 51
52 atomic_t nr_active; 52 atomic_t nr_active;
53 53
54 struct delayed_work delayed_run_work;
54 struct delayed_work delay_work; 55 struct delayed_work delay_work;
55 56
56 struct hlist_node cpuhp_dead; 57 struct hlist_node cpuhp_dead;
@@ -238,6 +239,7 @@ void blk_mq_stop_hw_queues(struct request_queue *q);
238void blk_mq_start_hw_queues(struct request_queue *q); 239void blk_mq_start_hw_queues(struct request_queue *q);
239void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 240void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
240void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); 241void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
242void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
241void blk_mq_run_hw_queues(struct request_queue *q, bool async); 243void blk_mq_run_hw_queues(struct request_queue *q, bool async);
242void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 244void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
243void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, 245void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5a7da607ca04..01a696b0a4d3 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -610,7 +610,6 @@ struct request_queue {
610#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ 610#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
611#define QUEUE_FLAG_DAX 26 /* device supports DAX */ 611#define QUEUE_FLAG_DAX 26 /* device supports DAX */
612#define QUEUE_FLAG_STATS 27 /* track rq completion times */ 612#define QUEUE_FLAG_STATS 27 /* track rq completion times */
613#define QUEUE_FLAG_RESTART 28 /* queue needs restart at completion */
614 613
615#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 614#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
616 (1 << QUEUE_FLAG_STACKABLE) | \ 615 (1 << QUEUE_FLAG_STACKABLE) | \
@@ -1673,12 +1672,36 @@ static inline bool bios_segs_mergeable(struct request_queue *q,
1673 return true; 1672 return true;
1674} 1673}
1675 1674
1676static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, 1675static inline bool bio_will_gap(struct request_queue *q,
1677 struct bio *next) 1676 struct request *prev_rq,
1677 struct bio *prev,
1678 struct bio *next)
1678{ 1679{
1679 if (bio_has_data(prev) && queue_virt_boundary(q)) { 1680 if (bio_has_data(prev) && queue_virt_boundary(q)) {
1680 struct bio_vec pb, nb; 1681 struct bio_vec pb, nb;
1681 1682
1683 /*
1684 * don't merge if the 1st bio starts with non-zero
1685 * offset, otherwise it is quite difficult to respect
1686 * sg gap limit. We work hard to merge a huge number of small
1687 * single bios in case of mkfs.
1688 */
1689 if (prev_rq)
1690 bio_get_first_bvec(prev_rq->bio, &pb);
1691 else
1692 bio_get_first_bvec(prev, &pb);
1693 if (pb.bv_offset)
1694 return true;
1695
1696 /*
1697 * We don't need to worry about the situation that the
1698 * merged segment ends in unaligned virt boundary:
1699 *
1700 * - if 'pb' ends aligned, the merged segment ends aligned
1701 * - if 'pb' ends unaligned, the next bio must include
1702 * one single bvec of 'nb', otherwise the 'nb' can't
1703 * merge with 'pb'
1704 */
1682 bio_get_last_bvec(prev, &pb); 1705 bio_get_last_bvec(prev, &pb);
1683 bio_get_first_bvec(next, &nb); 1706 bio_get_first_bvec(next, &nb);
1684 1707
@@ -1691,12 +1714,12 @@ static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
1691 1714
1692static inline bool req_gap_back_merge(struct request *req, struct bio *bio) 1715static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
1693{ 1716{
1694 return bio_will_gap(req->q, req->biotail, bio); 1717 return bio_will_gap(req->q, req, req->biotail, bio);
1695} 1718}
1696 1719
1697static inline bool req_gap_front_merge(struct request *req, struct bio *bio) 1720static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
1698{ 1721{
1699 return bio_will_gap(req->q, bio, req->bio); 1722 return bio_will_gap(req->q, NULL, bio, req->bio);
1700} 1723}
1701 1724
1702int kblockd_schedule_work(struct work_struct *work); 1725int kblockd_schedule_work(struct work_struct *work);
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index f6b43fbb141c..af9c86e958bd 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -570,6 +570,25 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
570 pr_cont_kernfs_path(cgrp->kn); 570 pr_cont_kernfs_path(cgrp->kn);
571} 571}
572 572
573static inline void cgroup_init_kthreadd(void)
574{
575 /*
576 * kthreadd is inherited by all kthreads, keep it in the root so
577 * that the new kthreads are guaranteed to stay in the root until
578 * initialization is finished.
579 */
580 current->no_cgroup_migration = 1;
581}
582
583static inline void cgroup_kthread_ready(void)
584{
585 /*
586 * This kthread finished initialization. The creator should have
587 * set PF_NO_SETAFFINITY if this kthread should stay in the root.
588 */
589 current->no_cgroup_migration = 0;
590}
591
573#else /* !CONFIG_CGROUPS */ 592#else /* !CONFIG_CGROUPS */
574 593
575struct cgroup_subsys_state; 594struct cgroup_subsys_state;
@@ -590,6 +609,8 @@ static inline void cgroup_free(struct task_struct *p) {}
590 609
591static inline int cgroup_init_early(void) { return 0; } 610static inline int cgroup_init_early(void) { return 0; }
592static inline int cgroup_init(void) { return 0; } 611static inline int cgroup_init(void) { return 0; }
612static inline void cgroup_init_kthreadd(void) {}
613static inline void cgroup_kthread_ready(void) {}
593 614
594static inline bool task_under_cgroup_hierarchy(struct task_struct *task, 615static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
595 struct cgroup *ancestor) 616 struct cgroup *ancestor)
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 5d3053c34fb3..6d7edc3082f9 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -229,7 +229,7 @@ static inline void tick_setup_hrtimer_broadcast(void) { }
229 229
230#ifdef CONFIG_CLKEVT_PROBE 230#ifdef CONFIG_CLKEVT_PROBE
231extern int clockevent_probe(void); 231extern int clockevent_probe(void);
232#els 232#else
233static inline int clockevent_probe(void) { return 0; } 233static inline int clockevent_probe(void) { return 0; }
234#endif 234#endif
235 235
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index bfb3704fc6fc..79f27d60ec66 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -39,13 +39,13 @@ struct dma_buf_attachment;
39 39
40/** 40/**
41 * struct dma_buf_ops - operations possible on struct dma_buf 41 * struct dma_buf_ops - operations possible on struct dma_buf
42 * @kmap_atomic: maps a page from the buffer into kernel address 42 * @map_atomic: maps a page from the buffer into kernel address
43 * space, users may not block until the subsequent unmap call. 43 * space, users may not block until the subsequent unmap call.
44 * This callback must not sleep. 44 * This callback must not sleep.
45 * @kunmap_atomic: [optional] unmaps a atomically mapped page from the buffer. 45 * @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
46 * This Callback must not sleep. 46 * This Callback must not sleep.
47 * @kmap: maps a page from the buffer into kernel address space. 47 * @map: maps a page from the buffer into kernel address space.
48 * @kunmap: [optional] unmaps a page from the buffer. 48 * @unmap: [optional] unmaps a page from the buffer.
49 * @vmap: [optional] creates a virtual mapping for the buffer into kernel 49 * @vmap: [optional] creates a virtual mapping for the buffer into kernel
50 * address space. Same restrictions as for vmap and friends apply. 50 * address space. Same restrictions as for vmap and friends apply.
51 * @vunmap: [optional] unmaps a vmap from the buffer 51 * @vunmap: [optional] unmaps a vmap from the buffer
@@ -206,10 +206,10 @@ struct dma_buf_ops {
206 * to be restarted. 206 * to be restarted.
207 */ 207 */
208 int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); 208 int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
209 void *(*kmap_atomic)(struct dma_buf *, unsigned long); 209 void *(*map_atomic)(struct dma_buf *, unsigned long);
210 void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); 210 void (*unmap_atomic)(struct dma_buf *, unsigned long, void *);
211 void *(*kmap)(struct dma_buf *, unsigned long); 211 void *(*map)(struct dma_buf *, unsigned long);
212 void (*kunmap)(struct dma_buf *, unsigned long, void *); 212 void (*unmap)(struct dma_buf *, unsigned long, void *);
213 213
214 /** 214 /**
215 * @mmap: 215 * @mmap:
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index aebecc4ed088..22d39e8d4de1 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -211,7 +211,7 @@ extern ssize_t elv_iosched_show(struct request_queue *, char *);
211extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); 211extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
212 212
213extern int elevator_init(struct request_queue *, char *); 213extern int elevator_init(struct request_queue *, char *);
214extern void elevator_exit(struct elevator_queue *); 214extern void elevator_exit(struct request_queue *, struct elevator_queue *);
215extern int elevator_change(struct request_queue *, const char *); 215extern int elevator_change(struct request_queue *, const char *);
216extern bool elv_bio_merge_ok(struct request *, struct bio *); 216extern bool elv_bio_merge_ok(struct request *, struct bio *);
217extern struct elevator_queue *elevator_alloc(struct request_queue *, 217extern struct elevator_queue *elevator_alloc(struct request_queue *,
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index eafc965b3eb8..dc30f3d057eb 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -96,6 +96,9 @@
96#define GICH_MISR_EOI (1 << 0) 96#define GICH_MISR_EOI (1 << 0)
97#define GICH_MISR_U (1 << 1) 97#define GICH_MISR_U (1 << 1)
98 98
99#define GICV_PMR_PRIORITY_SHIFT 3
100#define GICV_PMR_PRIORITY_MASK (0x1f << GICV_PMR_PRIORITY_SHIFT)
101
99#ifndef __ASSEMBLY__ 102#ifndef __ASSEMBLY__
100 103
101#include <linux/irqdomain.h> 104#include <linux/irqdomain.h>
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 5734480c9590..a5c7046f26b4 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -76,6 +76,9 @@ size_t ksize(const void *);
76static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); } 76static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
77size_t kasan_metadata_size(struct kmem_cache *cache); 77size_t kasan_metadata_size(struct kmem_cache *cache);
78 78
79bool kasan_save_enable_multi_shot(void);
80void kasan_restore_multi_shot(bool enabled);
81
79#else /* CONFIG_KASAN */ 82#else /* CONFIG_KASAN */
80 83
81static inline void kasan_unpoison_shadow(const void *address, size_t size) {} 84static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 2c14ad9809da..d0250744507a 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -162,8 +162,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
162 int len, void *val); 162 int len, void *val);
163int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 163int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
164 int len, struct kvm_io_device *dev); 164 int len, struct kvm_io_device *dev);
165int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 165void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
166 struct kvm_io_device *dev); 166 struct kvm_io_device *dev);
167struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 167struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
168 gpa_t addr); 168 gpa_t addr);
169 169
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 5af377303880..bb7250c45cb8 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -740,6 +740,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
740 return false; 740 return false;
741} 741}
742 742
743static inline void mem_cgroup_update_page_stat(struct page *page,
744 enum mem_cgroup_stat_index idx,
745 int nr)
746{
747}
748
743static inline void mem_cgroup_inc_page_stat(struct page *page, 749static inline void mem_cgroup_inc_page_stat(struct page *page,
744 enum mem_cgroup_stat_index idx) 750 enum mem_cgroup_stat_index idx)
745{ 751{
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 7a01c94496f1..3eef9fb9968a 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -35,10 +35,11 @@
35 * Max bus-specific overhead incurred by request/responses. 35 * Max bus-specific overhead incurred by request/responses.
36 * I2C requires 1 additional byte for requests. 36 * I2C requires 1 additional byte for requests.
37 * I2C requires 2 additional bytes for responses. 37 * I2C requires 2 additional bytes for responses.
38 * SPI requires up to 32 additional bytes for responses.
38 * */ 39 * */
39#define EC_PROTO_VERSION_UNKNOWN 0 40#define EC_PROTO_VERSION_UNKNOWN 0
40#define EC_MAX_REQUEST_OVERHEAD 1 41#define EC_MAX_REQUEST_OVERHEAD 1
41#define EC_MAX_RESPONSE_OVERHEAD 2 42#define EC_MAX_RESPONSE_OVERHEAD 32
42 43
43/* 44/*
44 * Command interface between EC and AP, for LPC, I2C and SPI interfaces. 45 * Command interface between EC and AP, for LPC, I2C and SPI interfaces.
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5f01c88f0800..00a8fa7e366a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -32,6 +32,8 @@ struct user_struct;
32struct writeback_control; 32struct writeback_control;
33struct bdi_writeback; 33struct bdi_writeback;
34 34
35void init_mm_internals(void);
36
35#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */ 37#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
36extern unsigned long max_mapnr; 38extern unsigned long max_mapnr;
37 39
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 51891fb0d3ce..c91b3bcd158f 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -394,18 +394,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
394 ___pud; \ 394 ___pud; \
395}) 395})
396 396
397#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \
398({ \
399 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
400 pmd_t ___pmd; \
401 \
402 ___pmd = pmdp_huge_get_and_clear(__mm, __haddr, __pmd); \
403 mmu_notifier_invalidate_range(__mm, ___haddr, \
404 ___haddr + HPAGE_PMD_SIZE); \
405 \
406 ___pmd; \
407})
408
409/* 397/*
410 * set_pte_at_notify() sets the pte _after_ running the notifier. 398 * set_pte_at_notify() sets the pte _after_ running the notifier.
411 * This is safe to start by updating the secondary MMUs, because the primary MMU 399 * This is safe to start by updating the secondary MMUs, because the primary MMU
@@ -489,7 +477,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
489#define ptep_clear_flush_notify ptep_clear_flush 477#define ptep_clear_flush_notify ptep_clear_flush
490#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush 478#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
491#define pudp_huge_clear_flush_notify pudp_huge_clear_flush 479#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
492#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
493#define set_pte_at_notify set_pte_at 480#define set_pte_at_notify set_pte_at
494 481
495#endif /* CONFIG_MMU_NOTIFIER */ 482#endif /* CONFIG_MMU_NOTIFIER */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index c43d435d4225..9061780b141f 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -64,26 +64,26 @@ enum {
64 * RDMA_QPTYPE field 64 * RDMA_QPTYPE field
65 */ 65 */
66enum { 66enum {
67 NVMF_RDMA_QPTYPE_CONNECTED = 0, /* Reliable Connected */ 67 NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */
68 NVMF_RDMA_QPTYPE_DATAGRAM = 1, /* Reliable Datagram */ 68 NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */
69}; 69};
70 70
71/* RDMA QP Service Type codes for Discovery Log Page entry TSAS 71/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
72 * RDMA_QPTYPE field 72 * RDMA_QPTYPE field
73 */ 73 */
74enum { 74enum {
75 NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 0, /* No Provider Specified */ 75 NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */
76 NVMF_RDMA_PRTYPE_IB = 1, /* InfiniBand */ 76 NVMF_RDMA_PRTYPE_IB = 2, /* InfiniBand */
77 NVMF_RDMA_PRTYPE_ROCE = 2, /* InfiniBand RoCE */ 77 NVMF_RDMA_PRTYPE_ROCE = 3, /* InfiniBand RoCE */
78 NVMF_RDMA_PRTYPE_ROCEV2 = 3, /* InfiniBand RoCEV2 */ 78 NVMF_RDMA_PRTYPE_ROCEV2 = 4, /* InfiniBand RoCEV2 */
79 NVMF_RDMA_PRTYPE_IWARP = 4, /* IWARP */ 79 NVMF_RDMA_PRTYPE_IWARP = 5, /* IWARP */
80}; 80};
81 81
82/* RDMA Connection Management Service Type codes for Discovery Log Page 82/* RDMA Connection Management Service Type codes for Discovery Log Page
83 * entry TSAS RDMA_CMS field 83 * entry TSAS RDMA_CMS field
84 */ 84 */
85enum { 85enum {
86 NVMF_RDMA_CMS_RDMA_CM = 0, /* Sockets based enpoint addressing */ 86 NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */
87}; 87};
88 88
89#define NVMF_AQ_DEPTH 32 89#define NVMF_AQ_DEPTH 32
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 8ce2d87a238b..5e45385c5bdc 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -145,8 +145,9 @@ struct pinctrl_desc {
145extern int pinctrl_register_and_init(struct pinctrl_desc *pctldesc, 145extern int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
146 struct device *dev, void *driver_data, 146 struct device *dev, void *driver_data,
147 struct pinctrl_dev **pctldev); 147 struct pinctrl_dev **pctldev);
148extern int pinctrl_enable(struct pinctrl_dev *pctldev);
148 149
149/* Please use pinctrl_register_and_init() instead */ 150/* Please use pinctrl_register_and_init() and pinctrl_enable() instead */
150extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc, 151extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
151 struct device *dev, void *driver_data); 152 struct device *dev, void *driver_data);
152 153
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 96fb139bdd08..13d8681210d5 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -15,6 +15,9 @@ int reset_control_status(struct reset_control *rstc);
15struct reset_control *__of_reset_control_get(struct device_node *node, 15struct reset_control *__of_reset_control_get(struct device_node *node,
16 const char *id, int index, bool shared, 16 const char *id, int index, bool shared,
17 bool optional); 17 bool optional);
18struct reset_control *__reset_control_get(struct device *dev, const char *id,
19 int index, bool shared,
20 bool optional);
18void reset_control_put(struct reset_control *rstc); 21void reset_control_put(struct reset_control *rstc);
19struct reset_control *__devm_reset_control_get(struct device *dev, 22struct reset_control *__devm_reset_control_get(struct device *dev,
20 const char *id, int index, bool shared, 23 const char *id, int index, bool shared,
@@ -72,6 +75,13 @@ static inline struct reset_control *__of_reset_control_get(
72 return optional ? NULL : ERR_PTR(-ENOTSUPP); 75 return optional ? NULL : ERR_PTR(-ENOTSUPP);
73} 76}
74 77
78static inline struct reset_control *__reset_control_get(
79 struct device *dev, const char *id,
80 int index, bool shared, bool optional)
81{
82 return optional ? NULL : ERR_PTR(-ENOTSUPP);
83}
84
75static inline struct reset_control *__devm_reset_control_get( 85static inline struct reset_control *__devm_reset_control_get(
76 struct device *dev, const char *id, 86 struct device *dev, const char *id,
77 int index, bool shared, bool optional) 87 int index, bool shared, bool optional)
@@ -102,8 +112,7 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
102#ifndef CONFIG_RESET_CONTROLLER 112#ifndef CONFIG_RESET_CONTROLLER
103 WARN_ON(1); 113 WARN_ON(1);
104#endif 114#endif
105 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false, 115 return __reset_control_get(dev, id, 0, false, false);
106 false);
107} 116}
108 117
109/** 118/**
@@ -131,22 +140,19 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
131static inline struct reset_control *reset_control_get_shared( 140static inline struct reset_control *reset_control_get_shared(
132 struct device *dev, const char *id) 141 struct device *dev, const char *id)
133{ 142{
134 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true, 143 return __reset_control_get(dev, id, 0, true, false);
135 false);
136} 144}
137 145
138static inline struct reset_control *reset_control_get_optional_exclusive( 146static inline struct reset_control *reset_control_get_optional_exclusive(
139 struct device *dev, const char *id) 147 struct device *dev, const char *id)
140{ 148{
141 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false, 149 return __reset_control_get(dev, id, 0, false, true);
142 true);
143} 150}
144 151
145static inline struct reset_control *reset_control_get_optional_shared( 152static inline struct reset_control *reset_control_get_optional_shared(
146 struct device *dev, const char *id) 153 struct device *dev, const char *id)
147{ 154{
148 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true, 155 return __reset_control_get(dev, id, 0, true, true);
149 true);
150} 156}
151 157
152/** 158/**
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d67eee84fd43..4cf9a59a4d08 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -604,6 +604,10 @@ struct task_struct {
604#ifdef CONFIG_COMPAT_BRK 604#ifdef CONFIG_COMPAT_BRK
605 unsigned brk_randomized:1; 605 unsigned brk_randomized:1;
606#endif 606#endif
607#ifdef CONFIG_CGROUPS
608 /* disallow userland-initiated cgroup migration */
609 unsigned no_cgroup_migration:1;
610#endif
607 611
608 unsigned long atomic_flags; /* Flags requiring atomic access. */ 612 unsigned long atomic_flags; /* Flags requiring atomic access. */
609 613
diff --git a/include/linux/sched/clock.h b/include/linux/sched/clock.h
index 4a68c6791207..34fe92ce1ebd 100644
--- a/include/linux/sched/clock.h
+++ b/include/linux/sched/clock.h
@@ -54,15 +54,16 @@ static inline u64 local_clock(void)
54} 54}
55#else 55#else
56extern void sched_clock_init_late(void); 56extern void sched_clock_init_late(void);
57/*
58 * Architectures can set this to 1 if they have specified
59 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
60 * but then during bootup it turns out that sched_clock()
61 * is reliable after all:
62 */
63extern int sched_clock_stable(void); 57extern int sched_clock_stable(void);
64extern void clear_sched_clock_stable(void); 58extern void clear_sched_clock_stable(void);
65 59
60/*
61 * When sched_clock_stable(), __sched_clock_offset provides the offset
62 * between local_clock() and sched_clock().
63 */
64extern u64 __sched_clock_offset;
65
66
66extern void sched_clock_tick(void); 67extern void sched_clock_tick(void);
67extern void sched_clock_idle_sleep_event(void); 68extern void sched_clock_idle_sleep_event(void);
68extern void sched_clock_idle_wakeup_event(u64 delta_ns); 69extern void sched_clock_idle_wakeup_event(u64 delta_ns);
diff --git a/include/linux/stat.h b/include/linux/stat.h
index c76e524fb34b..64b6b3aece21 100644
--- a/include/linux/stat.h
+++ b/include/linux/stat.h
@@ -26,6 +26,7 @@ struct kstat {
26 unsigned int nlink; 26 unsigned int nlink;
27 uint32_t blksize; /* Preferred I/O size */ 27 uint32_t blksize; /* Preferred I/O size */
28 u64 attributes; 28 u64 attributes;
29 u64 attributes_mask;
29#define KSTAT_ATTR_FS_IOC_FLAGS \ 30#define KSTAT_ATTR_FS_IOC_FLAGS \
30 (STATX_ATTR_COMPRESSED | \ 31 (STATX_ATTR_COMPRESSED | \
31 STATX_ATTR_IMMUTABLE | \ 32 STATX_ATTR_IMMUTABLE | \
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 804e34c6f981..f2d36a3d3005 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -39,7 +39,10 @@ struct iov_iter {
39 }; 39 };
40 union { 40 union {
41 unsigned long nr_segs; 41 unsigned long nr_segs;
42 int idx; 42 struct {
43 int idx;
44 int start_idx;
45 };
43 }; 46 };
44}; 47};
45 48
@@ -81,6 +84,7 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
81size_t iov_iter_copy_from_user_atomic(struct page *page, 84size_t iov_iter_copy_from_user_atomic(struct page *page,
82 struct iov_iter *i, unsigned long offset, size_t bytes); 85 struct iov_iter *i, unsigned long offset, size_t bytes);
83void iov_iter_advance(struct iov_iter *i, size_t bytes); 86void iov_iter_advance(struct iov_iter *i, size_t bytes);
87void iov_iter_revert(struct iov_iter *i, size_t bytes);
84int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); 88int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
85size_t iov_iter_single_seg_count(const struct iov_iter *i); 89size_t iov_iter_single_seg_count(const struct iov_iter *i);
86size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 90size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 04b0d3f95043..7edfbdb55a99 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -167,6 +167,7 @@ struct virtio_driver {
167 unsigned int feature_table_size; 167 unsigned int feature_table_size;
168 const unsigned int *feature_table_legacy; 168 const unsigned int *feature_table_legacy;
169 unsigned int feature_table_size_legacy; 169 unsigned int feature_table_size_legacy;
170 int (*validate)(struct virtio_device *dev);
170 int (*probe)(struct virtio_device *dev); 171 int (*probe)(struct virtio_device *dev);
171 void (*scan)(struct virtio_device *dev); 172 void (*scan)(struct virtio_device *dev);
172 void (*remove)(struct virtio_device *dev); 173 void (*remove)(struct virtio_device *dev);
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 1f71ee5ab518..069582ee5d7f 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -448,10 +448,9 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
448 return frag; 448 return frag;
449} 449}
450 450
451static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_association *asoc) 451static inline void sctp_assoc_pending_pmtu(struct sctp_association *asoc)
452{ 452{
453 453 sctp_assoc_sync_pmtu(asoc);
454 sctp_assoc_sync_pmtu(sk, asoc);
455 asoc->pmtu_pending = 0; 454 asoc->pmtu_pending = 0;
456} 455}
457 456
@@ -596,12 +595,23 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr)
596 */ 595 */
597static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t) 596static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
598{ 597{
599 if (t->dst && (!dst_check(t->dst, t->dst_cookie) || 598 if (t->dst && !dst_check(t->dst, t->dst_cookie))
600 t->pathmtu != max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)),
601 SCTP_DEFAULT_MINSEGMENT)))
602 sctp_transport_dst_release(t); 599 sctp_transport_dst_release(t);
603 600
604 return t->dst; 601 return t->dst;
605} 602}
606 603
604static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
605{
606 __u32 pmtu = max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)),
607 SCTP_DEFAULT_MINSEGMENT);
608
609 if (t->pathmtu == pmtu)
610 return true;
611
612 t->pathmtu = pmtu;
613
614 return false;
615}
616
607#endif /* __net_sctp_h__ */ 617#endif /* __net_sctp_h__ */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 592decebac75..138f8615acf0 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -377,7 +377,8 @@ typedef struct sctp_sender_hb_info {
377 __u64 hb_nonce; 377 __u64 hb_nonce;
378} sctp_sender_hb_info_t; 378} sctp_sender_hb_info_t;
379 379
380struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp); 380int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp);
381int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp);
381void sctp_stream_free(struct sctp_stream *stream); 382void sctp_stream_free(struct sctp_stream *stream);
382void sctp_stream_clear(struct sctp_stream *stream); 383void sctp_stream_clear(struct sctp_stream *stream);
383 384
@@ -499,7 +500,6 @@ struct sctp_datamsg {
499 /* Did the messenge fail to send? */ 500 /* Did the messenge fail to send? */
500 int send_error; 501 int send_error;
501 u8 send_failed:1, 502 u8 send_failed:1,
502 force_delay:1,
503 can_delay; /* should this message be Nagle delayed */ 503 can_delay; /* should this message be Nagle delayed */
504}; 504};
505 505
@@ -952,8 +952,8 @@ void sctp_transport_lower_cwnd(struct sctp_transport *, sctp_lower_cwnd_t);
952void sctp_transport_burst_limited(struct sctp_transport *); 952void sctp_transport_burst_limited(struct sctp_transport *);
953void sctp_transport_burst_reset(struct sctp_transport *); 953void sctp_transport_burst_reset(struct sctp_transport *);
954unsigned long sctp_transport_timeout(struct sctp_transport *); 954unsigned long sctp_transport_timeout(struct sctp_transport *);
955void sctp_transport_reset(struct sctp_transport *); 955void sctp_transport_reset(struct sctp_transport *t);
956void sctp_transport_update_pmtu(struct sock *, struct sctp_transport *, u32); 956void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
957void sctp_transport_immediate_rtx(struct sctp_transport *); 957void sctp_transport_immediate_rtx(struct sctp_transport *);
958void sctp_transport_dst_release(struct sctp_transport *t); 958void sctp_transport_dst_release(struct sctp_transport *t);
959void sctp_transport_dst_confirm(struct sctp_transport *t); 959void sctp_transport_dst_confirm(struct sctp_transport *t);
@@ -1878,6 +1878,7 @@ struct sctp_association {
1878 1878
1879 __u8 need_ecne:1, /* Need to send an ECNE Chunk? */ 1879 __u8 need_ecne:1, /* Need to send an ECNE Chunk? */
1880 temp:1, /* Is it a temporary association? */ 1880 temp:1, /* Is it a temporary association? */
1881 force_delay:1,
1881 prsctp_enable:1, 1882 prsctp_enable:1,
1882 reconf_enable:1; 1883 reconf_enable:1;
1883 1884
@@ -1953,7 +1954,7 @@ void sctp_assoc_update(struct sctp_association *old,
1953 1954
1954__u32 sctp_association_get_next_tsn(struct sctp_association *); 1955__u32 sctp_association_get_next_tsn(struct sctp_association *);
1955 1956
1956void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *); 1957void sctp_assoc_sync_pmtu(struct sctp_association *asoc);
1957void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int); 1958void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int);
1958void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int); 1959void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int);
1959void sctp_assoc_set_primary(struct sctp_association *, 1960void sctp_assoc_set_primary(struct sctp_association *,
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 4b784b6e21c0..ccfad0e9c2cd 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -117,6 +117,7 @@ enum transport_state_table {
117 TRANSPORT_ISTATE_PROCESSING = 11, 117 TRANSPORT_ISTATE_PROCESSING = 11,
118 TRANSPORT_COMPLETE_QF_WP = 18, 118 TRANSPORT_COMPLETE_QF_WP = 18,
119 TRANSPORT_COMPLETE_QF_OK = 19, 119 TRANSPORT_COMPLETE_QF_OK = 19,
120 TRANSPORT_COMPLETE_QF_ERR = 20,
120}; 121};
121 122
122/* Used for struct se_cmd->se_cmd_flags */ 123/* Used for struct se_cmd->se_cmd_flags */
@@ -279,8 +280,6 @@ struct t10_alua_tg_pt_gp {
279 u16 tg_pt_gp_id; 280 u16 tg_pt_gp_id;
280 int tg_pt_gp_valid_id; 281 int tg_pt_gp_valid_id;
281 int tg_pt_gp_alua_supported_states; 282 int tg_pt_gp_alua_supported_states;
282 int tg_pt_gp_alua_pending_state;
283 int tg_pt_gp_alua_previous_state;
284 int tg_pt_gp_alua_access_status; 283 int tg_pt_gp_alua_access_status;
285 int tg_pt_gp_alua_access_type; 284 int tg_pt_gp_alua_access_type;
286 int tg_pt_gp_nonop_delay_msecs; 285 int tg_pt_gp_nonop_delay_msecs;
@@ -289,18 +288,16 @@ struct t10_alua_tg_pt_gp {
289 int tg_pt_gp_pref; 288 int tg_pt_gp_pref;
290 int tg_pt_gp_write_metadata; 289 int tg_pt_gp_write_metadata;
291 u32 tg_pt_gp_members; 290 u32 tg_pt_gp_members;
292 atomic_t tg_pt_gp_alua_access_state; 291 int tg_pt_gp_alua_access_state;
293 atomic_t tg_pt_gp_ref_cnt; 292 atomic_t tg_pt_gp_ref_cnt;
294 spinlock_t tg_pt_gp_lock; 293 spinlock_t tg_pt_gp_lock;
295 struct mutex tg_pt_gp_md_mutex; 294 struct mutex tg_pt_gp_transition_mutex;
296 struct se_device *tg_pt_gp_dev; 295 struct se_device *tg_pt_gp_dev;
297 struct config_group tg_pt_gp_group; 296 struct config_group tg_pt_gp_group;
298 struct list_head tg_pt_gp_list; 297 struct list_head tg_pt_gp_list;
299 struct list_head tg_pt_gp_lun_list; 298 struct list_head tg_pt_gp_lun_list;
300 struct se_lun *tg_pt_gp_alua_lun; 299 struct se_lun *tg_pt_gp_alua_lun;
301 struct se_node_acl *tg_pt_gp_alua_nacl; 300 struct se_node_acl *tg_pt_gp_alua_nacl;
302 struct work_struct tg_pt_gp_transition_work;
303 struct completion *tg_pt_gp_transition_complete;
304}; 301};
305 302
306struct t10_vpd { 303struct t10_vpd {
@@ -705,6 +702,7 @@ struct se_lun {
705 u64 unpacked_lun; 702 u64 unpacked_lun;
706#define SE_LUN_LINK_MAGIC 0xffff7771 703#define SE_LUN_LINK_MAGIC 0xffff7771
707 u32 lun_link_magic; 704 u32 lun_link_magic;
705 bool lun_shutdown;
708 bool lun_access_ro; 706 bool lun_access_ro;
709 u32 lun_index; 707 u32 lun_index;
710 708
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index b2c52843bc70..42d9f64ce416 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -647,6 +647,7 @@ struct drm_gem_open {
647#define DRM_CAP_CURSOR_HEIGHT 0x9 647#define DRM_CAP_CURSOR_HEIGHT 0x9
648#define DRM_CAP_ADDFB2_MODIFIERS 0x10 648#define DRM_CAP_ADDFB2_MODIFIERS 0x10
649#define DRM_CAP_PAGE_FLIP_TARGET 0x11 649#define DRM_CAP_PAGE_FLIP_TARGET 0x11
650#define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12
650 651
651/** DRM_IOCTL_GET_CAP ioctl argument type */ 652/** DRM_IOCTL_GET_CAP ioctl argument type */
652struct drm_get_cap { 653struct drm_get_cap {
@@ -851,7 +852,7 @@ struct drm_event_vblank {
851 __u32 tv_sec; 852 __u32 tv_sec;
852 __u32 tv_usec; 853 __u32 tv_usec;
853 __u32 sequence; 854 __u32 sequence;
854 __u32 reserved; 855 __u32 crtc_id; /* 0 on older kernels that do not support this */
855}; 856};
856 857
857/* typedef area */ 858/* typedef area */
diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h
index 2584c1cca42f..76f6f78a352b 100644
--- a/include/uapi/drm/etnaviv_drm.h
+++ b/include/uapi/drm/etnaviv_drm.h
@@ -154,6 +154,12 @@ struct drm_etnaviv_gem_submit_bo {
154 * one or more cmdstream buffers. This allows for conditional execution 154 * one or more cmdstream buffers. This allows for conditional execution
155 * (context-restore), and IB buffers needed for per tile/bin draw cmds. 155 * (context-restore), and IB buffers needed for per tile/bin draw cmds.
156 */ 156 */
157#define ETNA_SUBMIT_NO_IMPLICIT 0x0001
158#define ETNA_SUBMIT_FENCE_FD_IN 0x0002
159#define ETNA_SUBMIT_FENCE_FD_OUT 0x0004
160#define ETNA_SUBMIT_FLAGS (ETNA_SUBMIT_NO_IMPLICIT | \
161 ETNA_SUBMIT_FENCE_FD_IN | \
162 ETNA_SUBMIT_FENCE_FD_OUT)
157#define ETNA_PIPE_3D 0x00 163#define ETNA_PIPE_3D 0x00
158#define ETNA_PIPE_2D 0x01 164#define ETNA_PIPE_2D 0x01
159#define ETNA_PIPE_VG 0x02 165#define ETNA_PIPE_VG 0x02
@@ -167,6 +173,8 @@ struct drm_etnaviv_gem_submit {
167 __u64 bos; /* in, ptr to array of submit_bo's */ 173 __u64 bos; /* in, ptr to array of submit_bo's */
168 __u64 relocs; /* in, ptr to array of submit_reloc's */ 174 __u64 relocs; /* in, ptr to array of submit_reloc's */
169 __u64 stream; /* in, ptr to cmdstream */ 175 __u64 stream; /* in, ptr to cmdstream */
176 __u32 flags; /* in, mask of ETNA_SUBMIT_x */
177 __s32 fence_fd; /* in/out, fence fd (see ETNA_SUBMIT_FENCE_FD_x) */
170}; 178};
171 179
172/* The normal way to synchronize with the GPU is just to CPU_PREP on 180/* The normal way to synchronize with the GPU is just to CPU_PREP on
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 4d5d6a2bc59e..a4a189a240d7 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -72,6 +72,7 @@ struct drm_msm_timespec {
72#define MSM_PARAM_CHIP_ID 0x03 72#define MSM_PARAM_CHIP_ID 0x03
73#define MSM_PARAM_MAX_FREQ 0x04 73#define MSM_PARAM_MAX_FREQ 0x04
74#define MSM_PARAM_TIMESTAMP 0x05 74#define MSM_PARAM_TIMESTAMP 0x05
75#define MSM_PARAM_GMEM_BASE 0x06
75 76
76struct drm_msm_param { 77struct drm_msm_param {
77 __u32 pipe; /* in, MSM_PIPE_x */ 78 __u32 pipe; /* in, MSM_PIPE_x */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index dd9820b1c779..f8d9fed17ba9 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -445,6 +445,7 @@ header-y += unistd.h
445header-y += unix_diag.h 445header-y += unix_diag.h
446header-y += usbdevice_fs.h 446header-y += usbdevice_fs.h
447header-y += usbip.h 447header-y += usbip.h
448header-y += userio.h
448header-y += utime.h 449header-y += utime.h
449header-y += utsname.h 450header-y += utsname.h
450header-y += uuid.h 451header-y += uuid.h
diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
index 2168759c1287..ef6fb307d2ce 100644
--- a/include/uapi/linux/media-bus-format.h
+++ b/include/uapi/linux/media-bus-format.h
@@ -33,7 +33,7 @@
33 33
34#define MEDIA_BUS_FMT_FIXED 0x0001 34#define MEDIA_BUS_FMT_FIXED 0x0001
35 35
36/* RGB - next is 0x1018 */ 36/* RGB - next is 0x101b */
37#define MEDIA_BUS_FMT_RGB444_1X12 0x1016 37#define MEDIA_BUS_FMT_RGB444_1X12 0x1016
38#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001 38#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001
39#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002 39#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002
@@ -57,8 +57,11 @@
57#define MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA 0x1012 57#define MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA 0x1012
58#define MEDIA_BUS_FMT_ARGB8888_1X32 0x100d 58#define MEDIA_BUS_FMT_ARGB8888_1X32 0x100d
59#define MEDIA_BUS_FMT_RGB888_1X32_PADHI 0x100f 59#define MEDIA_BUS_FMT_RGB888_1X32_PADHI 0x100f
60#define MEDIA_BUS_FMT_RGB101010_1X30 0x1018
61#define MEDIA_BUS_FMT_RGB121212_1X36 0x1019
62#define MEDIA_BUS_FMT_RGB161616_1X48 0x101a
60 63
61/* YUV (including grey) - next is 0x2026 */ 64/* YUV (including grey) - next is 0x202c */
62#define MEDIA_BUS_FMT_Y8_1X8 0x2001 65#define MEDIA_BUS_FMT_Y8_1X8 0x2001
63#define MEDIA_BUS_FMT_UV8_1X8 0x2015 66#define MEDIA_BUS_FMT_UV8_1X8 0x2015
64#define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002 67#define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002
@@ -90,12 +93,18 @@
90#define MEDIA_BUS_FMT_YVYU10_1X20 0x200e 93#define MEDIA_BUS_FMT_YVYU10_1X20 0x200e
91#define MEDIA_BUS_FMT_VUY8_1X24 0x2024 94#define MEDIA_BUS_FMT_VUY8_1X24 0x2024
92#define MEDIA_BUS_FMT_YUV8_1X24 0x2025 95#define MEDIA_BUS_FMT_YUV8_1X24 0x2025
96#define MEDIA_BUS_FMT_UYYVYY8_0_5X24 0x2026
93#define MEDIA_BUS_FMT_UYVY12_1X24 0x2020 97#define MEDIA_BUS_FMT_UYVY12_1X24 0x2020
94#define MEDIA_BUS_FMT_VYUY12_1X24 0x2021 98#define MEDIA_BUS_FMT_VYUY12_1X24 0x2021
95#define MEDIA_BUS_FMT_YUYV12_1X24 0x2022 99#define MEDIA_BUS_FMT_YUYV12_1X24 0x2022
96#define MEDIA_BUS_FMT_YVYU12_1X24 0x2023 100#define MEDIA_BUS_FMT_YVYU12_1X24 0x2023
97#define MEDIA_BUS_FMT_YUV10_1X30 0x2016 101#define MEDIA_BUS_FMT_YUV10_1X30 0x2016
102#define MEDIA_BUS_FMT_UYYVYY10_0_5X30 0x2027
98#define MEDIA_BUS_FMT_AYUV8_1X32 0x2017 103#define MEDIA_BUS_FMT_AYUV8_1X32 0x2017
104#define MEDIA_BUS_FMT_UYYVYY12_0_5X36 0x2028
105#define MEDIA_BUS_FMT_YUV12_1X36 0x2029
106#define MEDIA_BUS_FMT_YUV16_1X48 0x202a
107#define MEDIA_BUS_FMT_UYYVYY16_0_5X48 0x202b
99 108
100/* Bayer - next is 0x3021 */ 109/* Bayer - next is 0x3021 */
101#define MEDIA_BUS_FMT_SBGGR8_1X8 0x3001 110#define MEDIA_BUS_FMT_SBGGR8_1X8 0x3001
diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h
index 51a6b86e3700..d538897b8e08 100644
--- a/include/uapi/linux/stat.h
+++ b/include/uapi/linux/stat.h
@@ -114,7 +114,7 @@ struct statx {
114 __u64 stx_ino; /* Inode number */ 114 __u64 stx_ino; /* Inode number */
115 __u64 stx_size; /* File size */ 115 __u64 stx_size; /* File size */
116 __u64 stx_blocks; /* Number of 512-byte blocks allocated */ 116 __u64 stx_blocks; /* Number of 512-byte blocks allocated */
117 __u64 __spare1[1]; 117 __u64 stx_attributes_mask; /* Mask to show what's supported in stx_attributes */
118 /* 0x40 */ 118 /* 0x40 */
119 struct statx_timestamp stx_atime; /* Last access time */ 119 struct statx_timestamp stx_atime; /* Last access time */
120 struct statx_timestamp stx_btime; /* File creation time */ 120 struct statx_timestamp stx_btime; /* File creation time */
@@ -152,9 +152,10 @@ struct statx {
152#define STATX_BASIC_STATS 0x000007ffU /* The stuff in the normal stat struct */ 152#define STATX_BASIC_STATS 0x000007ffU /* The stuff in the normal stat struct */
153#define STATX_BTIME 0x00000800U /* Want/got stx_btime */ 153#define STATX_BTIME 0x00000800U /* Want/got stx_btime */
154#define STATX_ALL 0x00000fffU /* All currently supported flags */ 154#define STATX_ALL 0x00000fffU /* All currently supported flags */
155#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
155 156
156/* 157/*
157 * Attributes to be found in stx_attributes 158 * Attributes to be found in stx_attributes and masked in stx_attributes_mask.
158 * 159 *
159 * These give information about the features or the state of a file that might 160 * These give information about the features or the state of a file that might
160 * be of use to ordinary userspace programs such as GUIs or ls rather than 161 * be of use to ordinary userspace programs such as GUIs or ls rather than
diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h
index 15b4385a2be1..90007a1abcab 100644
--- a/include/uapi/linux/virtio_pci.h
+++ b/include/uapi/linux/virtio_pci.h
@@ -79,7 +79,7 @@
79 * configuration space */ 79 * configuration space */
80#define VIRTIO_PCI_CONFIG_OFF(msix_enabled) ((msix_enabled) ? 24 : 20) 80#define VIRTIO_PCI_CONFIG_OFF(msix_enabled) ((msix_enabled) ? 24 : 20)
81/* Deprecated: please use VIRTIO_PCI_CONFIG_OFF instead */ 81/* Deprecated: please use VIRTIO_PCI_CONFIG_OFF instead */
82#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->pci_dev->msix_enabled) 82#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->msix_enabled)
83 83
84/* Virtio ABI version, this must match exactly */ 84/* Virtio ABI version, this must match exactly */
85#define VIRTIO_PCI_ABI_VERSION 0 85#define VIRTIO_PCI_ABI_VERSION 0
diff --git a/init/main.c b/init/main.c
index f9c9d9948203..b0c11cbf5ddf 100644
--- a/init/main.c
+++ b/init/main.c
@@ -1022,6 +1022,8 @@ static noinline void __init kernel_init_freeable(void)
1022 1022
1023 workqueue_init(); 1023 workqueue_init();
1024 1024
1025 init_mm_internals();
1026
1025 do_pre_smp_initcalls(); 1027 do_pre_smp_initcalls();
1026 lockup_detector_init(); 1028 lockup_detector_init();
1027 1029
diff --git a/kernel/audit.c b/kernel/audit.c
index 2f4964cfde0b..a871bf80fde1 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -160,7 +160,6 @@ static LIST_HEAD(audit_freelist);
160 160
161/* queue msgs to send via kauditd_task */ 161/* queue msgs to send via kauditd_task */
162static struct sk_buff_head audit_queue; 162static struct sk_buff_head audit_queue;
163static void kauditd_hold_skb(struct sk_buff *skb);
164/* queue msgs due to temporary unicast send problems */ 163/* queue msgs due to temporary unicast send problems */
165static struct sk_buff_head audit_retry_queue; 164static struct sk_buff_head audit_retry_queue;
166/* queue msgs waiting for new auditd connection */ 165/* queue msgs waiting for new auditd connection */
@@ -454,30 +453,6 @@ static void auditd_set(int pid, u32 portid, struct net *net)
454} 453}
455 454
456/** 455/**
457 * auditd_reset - Disconnect the auditd connection
458 *
459 * Description:
460 * Break the auditd/kauditd connection and move all the queued records into the
461 * hold queue in case auditd reconnects.
462 */
463static void auditd_reset(void)
464{
465 struct sk_buff *skb;
466
467 /* if it isn't already broken, break the connection */
468 rcu_read_lock();
469 if (auditd_conn.pid)
470 auditd_set(0, 0, NULL);
471 rcu_read_unlock();
472
473 /* flush all of the main and retry queues to the hold queue */
474 while ((skb = skb_dequeue(&audit_retry_queue)))
475 kauditd_hold_skb(skb);
476 while ((skb = skb_dequeue(&audit_queue)))
477 kauditd_hold_skb(skb);
478}
479
480/**
481 * kauditd_print_skb - Print the audit record to the ring buffer 456 * kauditd_print_skb - Print the audit record to the ring buffer
482 * @skb: audit record 457 * @skb: audit record
483 * 458 *
@@ -505,9 +480,6 @@ static void kauditd_rehold_skb(struct sk_buff *skb)
505{ 480{
506 /* put the record back in the queue at the same place */ 481 /* put the record back in the queue at the same place */
507 skb_queue_head(&audit_hold_queue, skb); 482 skb_queue_head(&audit_hold_queue, skb);
508
509 /* fail the auditd connection */
510 auditd_reset();
511} 483}
512 484
513/** 485/**
@@ -544,9 +516,6 @@ static void kauditd_hold_skb(struct sk_buff *skb)
544 /* we have no other options - drop the message */ 516 /* we have no other options - drop the message */
545 audit_log_lost("kauditd hold queue overflow"); 517 audit_log_lost("kauditd hold queue overflow");
546 kfree_skb(skb); 518 kfree_skb(skb);
547
548 /* fail the auditd connection */
549 auditd_reset();
550} 519}
551 520
552/** 521/**
@@ -567,6 +536,30 @@ static void kauditd_retry_skb(struct sk_buff *skb)
567} 536}
568 537
569/** 538/**
539 * auditd_reset - Disconnect the auditd connection
540 *
541 * Description:
542 * Break the auditd/kauditd connection and move all the queued records into the
543 * hold queue in case auditd reconnects.
544 */
545static void auditd_reset(void)
546{
547 struct sk_buff *skb;
548
549 /* if it isn't already broken, break the connection */
550 rcu_read_lock();
551 if (auditd_conn.pid)
552 auditd_set(0, 0, NULL);
553 rcu_read_unlock();
554
555 /* flush all of the main and retry queues to the hold queue */
556 while ((skb = skb_dequeue(&audit_retry_queue)))
557 kauditd_hold_skb(skb);
558 while ((skb = skb_dequeue(&audit_queue)))
559 kauditd_hold_skb(skb);
560}
561
562/**
570 * auditd_send_unicast_skb - Send a record via unicast to auditd 563 * auditd_send_unicast_skb - Send a record via unicast to auditd
571 * @skb: audit record 564 * @skb: audit record
572 * 565 *
@@ -758,6 +751,7 @@ static int kauditd_thread(void *dummy)
758 NULL, kauditd_rehold_skb); 751 NULL, kauditd_rehold_skb);
759 if (rc < 0) { 752 if (rc < 0) {
760 sk = NULL; 753 sk = NULL;
754 auditd_reset();
761 goto main_queue; 755 goto main_queue;
762 } 756 }
763 757
@@ -767,6 +761,7 @@ static int kauditd_thread(void *dummy)
767 NULL, kauditd_hold_skb); 761 NULL, kauditd_hold_skb);
768 if (rc < 0) { 762 if (rc < 0) {
769 sk = NULL; 763 sk = NULL;
764 auditd_reset();
770 goto main_queue; 765 goto main_queue;
771 } 766 }
772 767
@@ -775,16 +770,18 @@ main_queue:
775 * unicast, dump failed record sends to the retry queue; if 770 * unicast, dump failed record sends to the retry queue; if
776 * sk == NULL due to previous failures we will just do the 771 * sk == NULL due to previous failures we will just do the
777 * multicast send and move the record to the retry queue */ 772 * multicast send and move the record to the retry queue */
778 kauditd_send_queue(sk, portid, &audit_queue, 1, 773 rc = kauditd_send_queue(sk, portid, &audit_queue, 1,
779 kauditd_send_multicast_skb, 774 kauditd_send_multicast_skb,
780 kauditd_retry_skb); 775 kauditd_retry_skb);
776 if (sk == NULL || rc < 0)
777 auditd_reset();
778 sk = NULL;
781 779
782 /* drop our netns reference, no auditd sends past this line */ 780 /* drop our netns reference, no auditd sends past this line */
783 if (net) { 781 if (net) {
784 put_net(net); 782 put_net(net);
785 net = NULL; 783 net = NULL;
786 } 784 }
787 sk = NULL;
788 785
789 /* we have processed all the queues so wake everyone */ 786 /* we have processed all the queues so wake everyone */
790 wake_up(&audit_backlog_wait); 787 wake_up(&audit_backlog_wait);
diff --git a/kernel/audit.h b/kernel/audit.h
index 0f1cf6d1878a..0d87f8ab8778 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -333,13 +333,7 @@ extern u32 audit_sig_sid;
333extern int audit_filter(int msgtype, unsigned int listtype); 333extern int audit_filter(int msgtype, unsigned int listtype);
334 334
335#ifdef CONFIG_AUDITSYSCALL 335#ifdef CONFIG_AUDITSYSCALL
336extern int __audit_signal_info(int sig, struct task_struct *t); 336extern int audit_signal_info(int sig, struct task_struct *t);
337static inline int audit_signal_info(int sig, struct task_struct *t)
338{
339 if (auditd_test_task(t) || (audit_signals && !audit_dummy_context()))
340 return __audit_signal_info(sig, t);
341 return 0;
342}
343extern void audit_filter_inodes(struct task_struct *, struct audit_context *); 337extern void audit_filter_inodes(struct task_struct *, struct audit_context *);
344extern struct list_head *audit_killed_trees(void); 338extern struct list_head *audit_killed_trees(void);
345#else 339#else
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index e59ffc7fc522..1c2333155893 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -2249,26 +2249,27 @@ void __audit_ptrace(struct task_struct *t)
2249 * If the audit subsystem is being terminated, record the task (pid) 2249 * If the audit subsystem is being terminated, record the task (pid)
2250 * and uid that is doing that. 2250 * and uid that is doing that.
2251 */ 2251 */
2252int __audit_signal_info(int sig, struct task_struct *t) 2252int audit_signal_info(int sig, struct task_struct *t)
2253{ 2253{
2254 struct audit_aux_data_pids *axp; 2254 struct audit_aux_data_pids *axp;
2255 struct task_struct *tsk = current; 2255 struct task_struct *tsk = current;
2256 struct audit_context *ctx = tsk->audit_context; 2256 struct audit_context *ctx = tsk->audit_context;
2257 kuid_t uid = current_uid(), t_uid = task_uid(t); 2257 kuid_t uid = current_uid(), t_uid = task_uid(t);
2258 2258
2259 if (auditd_test_task(t)) { 2259 if (auditd_test_task(t) &&
2260 if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { 2260 (sig == SIGTERM || sig == SIGHUP ||
2261 audit_sig_pid = task_tgid_nr(tsk); 2261 sig == SIGUSR1 || sig == SIGUSR2)) {
2262 if (uid_valid(tsk->loginuid)) 2262 audit_sig_pid = task_tgid_nr(tsk);
2263 audit_sig_uid = tsk->loginuid; 2263 if (uid_valid(tsk->loginuid))
2264 else 2264 audit_sig_uid = tsk->loginuid;
2265 audit_sig_uid = uid; 2265 else
2266 security_task_getsecid(tsk, &audit_sig_sid); 2266 audit_sig_uid = uid;
2267 } 2267 security_task_getsecid(tsk, &audit_sig_sid);
2268 if (!audit_signals || audit_dummy_context())
2269 return 0;
2270 } 2268 }
2271 2269
2270 if (!audit_signals || audit_dummy_context())
2271 return 0;
2272
2272 /* optimize the common case by putting first signal recipient directly 2273 /* optimize the common case by putting first signal recipient directly
2273 * in audit_context */ 2274 * in audit_context */
2274 if (!ctx->target_pid) { 2275 if (!ctx->target_pid) {
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index f45827e205d3..b4f1cb0c5ac7 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1162,12 +1162,12 @@ out:
1162 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */ 1162 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
1163 off = IMM; 1163 off = IMM;
1164load_word: 1164load_word:
1165 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are 1165 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
1166 * only appearing in the programs where ctx == 1166 * appearing in the programs where ctx == skb
1167 * skb. All programs keep 'ctx' in regs[BPF_REG_CTX] 1167 * (see may_access_skb() in the verifier). All programs
1168 * == BPF_R6, bpf_convert_filter() saves it in BPF_R6, 1168 * keep 'ctx' in regs[BPF_REG_CTX] == BPF_R6,
1169 * internal BPF verifier will check that BPF_R6 == 1169 * bpf_convert_filter() saves it in BPF_R6, internal BPF
1170 * ctx. 1170 * verifier will check that BPF_R6 == ctx.
1171 * 1171 *
1172 * BPF_ABS and BPF_IND are wrappers of function calls, 1172 * BPF_ABS and BPF_IND are wrappers of function calls,
1173 * so they scratch BPF_R1-BPF_R5 registers, preserve 1173 * so they scratch BPF_R1-BPF_R5 registers, preserve
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 796b68d00119..a834068a400e 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -765,38 +765,56 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
765 } 765 }
766} 766}
767 767
768static int check_ptr_alignment(struct bpf_verifier_env *env, 768static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
769 struct bpf_reg_state *reg, int off, int size) 769 int off, int size)
770{ 770{
771 if (reg->type != PTR_TO_PACKET && reg->type != PTR_TO_MAP_VALUE_ADJ) {
772 if (off % size != 0) {
773 verbose("misaligned access off %d size %d\n",
774 off, size);
775 return -EACCES;
776 } else {
777 return 0;
778 }
779 }
780
781 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
782 /* misaligned access to packet is ok on x86,arm,arm64 */
783 return 0;
784
785 if (reg->id && size != 1) { 771 if (reg->id && size != 1) {
786 verbose("Unknown packet alignment. Only byte-sized access allowed\n"); 772 verbose("Unknown alignment. Only byte-sized access allowed in packet access.\n");
787 return -EACCES; 773 return -EACCES;
788 } 774 }
789 775
790 /* skb->data is NET_IP_ALIGN-ed */ 776 /* skb->data is NET_IP_ALIGN-ed */
791 if (reg->type == PTR_TO_PACKET && 777 if ((NET_IP_ALIGN + reg->off + off) % size != 0) {
792 (NET_IP_ALIGN + reg->off + off) % size != 0) {
793 verbose("misaligned packet access off %d+%d+%d size %d\n", 778 verbose("misaligned packet access off %d+%d+%d size %d\n",
794 NET_IP_ALIGN, reg->off, off, size); 779 NET_IP_ALIGN, reg->off, off, size);
795 return -EACCES; 780 return -EACCES;
796 } 781 }
782
797 return 0; 783 return 0;
798} 784}
799 785
786static int check_val_ptr_alignment(const struct bpf_reg_state *reg,
787 int size)
788{
789 if (size != 1) {
790 verbose("Unknown alignment. Only byte-sized access allowed in value access.\n");
791 return -EACCES;
792 }
793
794 return 0;
795}
796
797static int check_ptr_alignment(const struct bpf_reg_state *reg,
798 int off, int size)
799{
800 switch (reg->type) {
801 case PTR_TO_PACKET:
802 return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 :
803 check_pkt_ptr_alignment(reg, off, size);
804 case PTR_TO_MAP_VALUE_ADJ:
805 return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 :
806 check_val_ptr_alignment(reg, size);
807 default:
808 if (off % size != 0) {
809 verbose("misaligned access off %d size %d\n",
810 off, size);
811 return -EACCES;
812 }
813
814 return 0;
815 }
816}
817
800/* check whether memory at (regno + off) is accessible for t = (read | write) 818/* check whether memory at (regno + off) is accessible for t = (read | write)
801 * if t==write, value_regno is a register which value is stored into memory 819 * if t==write, value_regno is a register which value is stored into memory
802 * if t==read, value_regno is a register which will receive the value from memory 820 * if t==read, value_regno is a register which will receive the value from memory
@@ -818,7 +836,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
818 if (size < 0) 836 if (size < 0)
819 return size; 837 return size;
820 838
821 err = check_ptr_alignment(env, reg, off, size); 839 err = check_ptr_alignment(reg, off, size);
822 if (err) 840 if (err)
823 return err; 841 return err;
824 842
@@ -1925,6 +1943,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
1925 * register as unknown. 1943 * register as unknown.
1926 */ 1944 */
1927 if (env->allow_ptr_leaks && 1945 if (env->allow_ptr_leaks &&
1946 BPF_CLASS(insn->code) == BPF_ALU64 && opcode == BPF_ADD &&
1928 (dst_reg->type == PTR_TO_MAP_VALUE || 1947 (dst_reg->type == PTR_TO_MAP_VALUE ||
1929 dst_reg->type == PTR_TO_MAP_VALUE_ADJ)) 1948 dst_reg->type == PTR_TO_MAP_VALUE_ADJ))
1930 dst_reg->type = PTR_TO_MAP_VALUE_ADJ; 1949 dst_reg->type = PTR_TO_MAP_VALUE_ADJ;
@@ -1973,14 +1992,15 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
1973 1992
1974 for (i = 0; i < MAX_BPF_REG; i++) 1993 for (i = 0; i < MAX_BPF_REG; i++)
1975 if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id) 1994 if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)
1976 regs[i].range = dst_reg->off; 1995 /* keep the maximum range already checked */
1996 regs[i].range = max(regs[i].range, dst_reg->off);
1977 1997
1978 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 1998 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
1979 if (state->stack_slot_type[i] != STACK_SPILL) 1999 if (state->stack_slot_type[i] != STACK_SPILL)
1980 continue; 2000 continue;
1981 reg = &state->spilled_regs[i / BPF_REG_SIZE]; 2001 reg = &state->spilled_regs[i / BPF_REG_SIZE];
1982 if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id) 2002 if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id)
1983 reg->range = dst_reg->off; 2003 reg->range = max(reg->range, dst_reg->off);
1984 } 2004 }
1985} 2005}
1986 2006
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 48851327a15e..687f5e0194ef 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -2425,11 +2425,12 @@ ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
2425 tsk = tsk->group_leader; 2425 tsk = tsk->group_leader;
2426 2426
2427 /* 2427 /*
2428 * Workqueue threads may acquire PF_NO_SETAFFINITY and become 2428 * kthreads may acquire PF_NO_SETAFFINITY during initialization.
2429 * trapped in a cpuset, or RT worker may be born in a cgroup 2429 * If userland migrates such a kthread to a non-root cgroup, it can
2430 * with no rt_runtime allocated. Just say no. 2430 * become trapped in a cpuset, or RT kthread may be born in a
2431 * cgroup with no rt_runtime allocated. Just say no.
2431 */ 2432 */
2432 if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) { 2433 if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) {
2433 ret = -EINVAL; 2434 ret = -EINVAL;
2434 goto out_unlock_rcu; 2435 goto out_unlock_rcu;
2435 } 2436 }
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index 4544b115f5eb..d052947fe785 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -59,7 +59,7 @@ static int get_nodes_in_cpumask(const struct cpumask *mask, nodemask_t *nodemsk)
59struct cpumask * 59struct cpumask *
60irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) 60irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
61{ 61{
62 int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec; 62 int n, nodes, cpus_per_vec, extra_vecs, curvec;
63 int affv = nvecs - affd->pre_vectors - affd->post_vectors; 63 int affv = nvecs - affd->pre_vectors - affd->post_vectors;
64 int last_affv = affv + affd->pre_vectors; 64 int last_affv = affv + affd->pre_vectors;
65 nodemask_t nodemsk = NODE_MASK_NONE; 65 nodemask_t nodemsk = NODE_MASK_NONE;
@@ -94,19 +94,21 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
94 goto done; 94 goto done;
95 } 95 }
96 96
97 /* Spread the vectors per node */
98 vecs_per_node = affv / nodes;
99 /* Account for rounding errors */
100 extra_vecs = affv - (nodes * vecs_per_node);
101
102 for_each_node_mask(n, nodemsk) { 97 for_each_node_mask(n, nodemsk) {
103 int ncpus, v, vecs_to_assign = vecs_per_node; 98 int ncpus, v, vecs_to_assign, vecs_per_node;
99
100 /* Spread the vectors per node */
101 vecs_per_node = (affv - curvec) / nodes;
104 102
105 /* Get the cpus on this node which are in the mask */ 103 /* Get the cpus on this node which are in the mask */
106 cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n)); 104 cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n));
107 105
108 /* Calculate the number of cpus per vector */ 106 /* Calculate the number of cpus per vector */
109 ncpus = cpumask_weight(nmsk); 107 ncpus = cpumask_weight(nmsk);
108 vecs_to_assign = min(vecs_per_node, ncpus);
109
110 /* Account for rounding errors */
111 extra_vecs = ncpus - vecs_to_assign * (ncpus / vecs_to_assign);
110 112
111 for (v = 0; curvec < last_affv && v < vecs_to_assign; 113 for (v = 0; curvec < last_affv && v < vecs_to_assign;
112 curvec++, v++) { 114 curvec++, v++) {
@@ -115,14 +117,14 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
115 /* Account for extra vectors to compensate rounding errors */ 117 /* Account for extra vectors to compensate rounding errors */
116 if (extra_vecs) { 118 if (extra_vecs) {
117 cpus_per_vec++; 119 cpus_per_vec++;
118 if (!--extra_vecs) 120 --extra_vecs;
119 vecs_per_node++;
120 } 121 }
121 irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec); 122 irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec);
122 } 123 }
123 124
124 if (curvec >= last_affv) 125 if (curvec >= last_affv)
125 break; 126 break;
127 --nodes;
126 } 128 }
127 129
128done: 130done:
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 2f26adea0f84..26db528c1d88 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -20,6 +20,7 @@
20#include <linux/freezer.h> 20#include <linux/freezer.h>
21#include <linux/ptrace.h> 21#include <linux/ptrace.h>
22#include <linux/uaccess.h> 22#include <linux/uaccess.h>
23#include <linux/cgroup.h>
23#include <trace/events/sched.h> 24#include <trace/events/sched.h>
24 25
25static DEFINE_SPINLOCK(kthread_create_lock); 26static DEFINE_SPINLOCK(kthread_create_lock);
@@ -225,6 +226,7 @@ static int kthread(void *_create)
225 226
226 ret = -EINTR; 227 ret = -EINTR;
227 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) { 228 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
229 cgroup_kthread_ready();
228 __kthread_parkme(self); 230 __kthread_parkme(self);
229 ret = threadfn(data); 231 ret = threadfn(data);
230 } 232 }
@@ -538,6 +540,7 @@ int kthreadd(void *unused)
538 set_mems_allowed(node_states[N_MEMORY]); 540 set_mems_allowed(node_states[N_MEMORY]);
539 541
540 current->flags |= PF_NOFREEZE; 542 current->flags |= PF_NOFREEZE;
543 cgroup_init_kthreadd();
541 544
542 for (;;) { 545 for (;;) {
543 set_current_state(TASK_INTERRUPTIBLE); 546 set_current_state(TASK_INTERRUPTIBLE);
diff --git a/kernel/padata.c b/kernel/padata.c
index 05316c9f32da..3202aa17492c 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -186,19 +186,20 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd)
186 186
187 reorder = &next_queue->reorder; 187 reorder = &next_queue->reorder;
188 188
189 spin_lock(&reorder->lock);
189 if (!list_empty(&reorder->list)) { 190 if (!list_empty(&reorder->list)) {
190 padata = list_entry(reorder->list.next, 191 padata = list_entry(reorder->list.next,
191 struct padata_priv, list); 192 struct padata_priv, list);
192 193
193 spin_lock(&reorder->lock);
194 list_del_init(&padata->list); 194 list_del_init(&padata->list);
195 atomic_dec(&pd->reorder_objects); 195 atomic_dec(&pd->reorder_objects);
196 spin_unlock(&reorder->lock);
197 196
198 pd->processed++; 197 pd->processed++;
199 198
199 spin_unlock(&reorder->lock);
200 goto out; 200 goto out;
201 } 201 }
202 spin_unlock(&reorder->lock);
202 203
203 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) { 204 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) {
204 padata = ERR_PTR(-ENODATA); 205 padata = ERR_PTR(-ENODATA);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 0af928712174..266ddcc1d8bb 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -184,11 +184,17 @@ static void ptrace_unfreeze_traced(struct task_struct *task)
184 184
185 WARN_ON(!task->ptrace || task->parent != current); 185 WARN_ON(!task->ptrace || task->parent != current);
186 186
187 /*
188 * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
189 * Recheck state under the lock to close this race.
190 */
187 spin_lock_irq(&task->sighand->siglock); 191 spin_lock_irq(&task->sighand->siglock);
188 if (__fatal_signal_pending(task)) 192 if (task->state == __TASK_TRACED) {
189 wake_up_state(task, __TASK_TRACED); 193 if (__fatal_signal_pending(task))
190 else 194 wake_up_state(task, __TASK_TRACED);
191 task->state = TASK_TRACED; 195 else
196 task->state = TASK_TRACED;
197 }
192 spin_unlock_irq(&task->sighand->siglock); 198 spin_unlock_irq(&task->sighand->siglock);
193} 199}
194 200
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index a08795e21628..00a45c45beca 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -96,10 +96,10 @@ static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable);
96static int __sched_clock_stable_early = 1; 96static int __sched_clock_stable_early = 1;
97 97
98/* 98/*
99 * We want: ktime_get_ns() + gtod_offset == sched_clock() + raw_offset 99 * We want: ktime_get_ns() + __gtod_offset == sched_clock() + __sched_clock_offset
100 */ 100 */
101static __read_mostly u64 raw_offset; 101__read_mostly u64 __sched_clock_offset;
102static __read_mostly u64 gtod_offset; 102static __read_mostly u64 __gtod_offset;
103 103
104struct sched_clock_data { 104struct sched_clock_data {
105 u64 tick_raw; 105 u64 tick_raw;
@@ -131,17 +131,24 @@ static void __set_sched_clock_stable(void)
131 /* 131 /*
132 * Attempt to make the (initial) unstable->stable transition continuous. 132 * Attempt to make the (initial) unstable->stable transition continuous.
133 */ 133 */
134 raw_offset = (scd->tick_gtod + gtod_offset) - (scd->tick_raw); 134 __sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw);
135 135
136 printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n", 136 printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n",
137 scd->tick_gtod, gtod_offset, 137 scd->tick_gtod, __gtod_offset,
138 scd->tick_raw, raw_offset); 138 scd->tick_raw, __sched_clock_offset);
139 139
140 static_branch_enable(&__sched_clock_stable); 140 static_branch_enable(&__sched_clock_stable);
141 tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE); 141 tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE);
142} 142}
143 143
144static void __clear_sched_clock_stable(struct work_struct *work) 144static void __sched_clock_work(struct work_struct *work)
145{
146 static_branch_disable(&__sched_clock_stable);
147}
148
149static DECLARE_WORK(sched_clock_work, __sched_clock_work);
150
151static void __clear_sched_clock_stable(void)
145{ 152{
146 struct sched_clock_data *scd = this_scd(); 153 struct sched_clock_data *scd = this_scd();
147 154
@@ -154,17 +161,17 @@ static void __clear_sched_clock_stable(struct work_struct *work)
154 * 161 *
155 * Still do what we can. 162 * Still do what we can.
156 */ 163 */
157 gtod_offset = (scd->tick_raw + raw_offset) - (scd->tick_gtod); 164 __gtod_offset = (scd->tick_raw + __sched_clock_offset) - (scd->tick_gtod);
158 165
159 printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n", 166 printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n",
160 scd->tick_gtod, gtod_offset, 167 scd->tick_gtod, __gtod_offset,
161 scd->tick_raw, raw_offset); 168 scd->tick_raw, __sched_clock_offset);
162 169
163 static_branch_disable(&__sched_clock_stable);
164 tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE); 170 tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE);
165}
166 171
167static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable); 172 if (sched_clock_stable())
173 schedule_work(&sched_clock_work);
174}
168 175
169void clear_sched_clock_stable(void) 176void clear_sched_clock_stable(void)
170{ 177{
@@ -173,7 +180,7 @@ void clear_sched_clock_stable(void)
173 smp_mb(); /* matches sched_clock_init_late() */ 180 smp_mb(); /* matches sched_clock_init_late() */
174 181
175 if (sched_clock_running == 2) 182 if (sched_clock_running == 2)
176 schedule_work(&sched_clock_work); 183 __clear_sched_clock_stable();
177} 184}
178 185
179void sched_clock_init_late(void) 186void sched_clock_init_late(void)
@@ -214,7 +221,7 @@ static inline u64 wrap_max(u64 x, u64 y)
214 */ 221 */
215static u64 sched_clock_local(struct sched_clock_data *scd) 222static u64 sched_clock_local(struct sched_clock_data *scd)
216{ 223{
217 u64 now, clock, old_clock, min_clock, max_clock; 224 u64 now, clock, old_clock, min_clock, max_clock, gtod;
218 s64 delta; 225 s64 delta;
219 226
220again: 227again:
@@ -231,9 +238,10 @@ again:
231 * scd->tick_gtod + TICK_NSEC); 238 * scd->tick_gtod + TICK_NSEC);
232 */ 239 */
233 240
234 clock = scd->tick_gtod + gtod_offset + delta; 241 gtod = scd->tick_gtod + __gtod_offset;
235 min_clock = wrap_max(scd->tick_gtod, old_clock); 242 clock = gtod + delta;
236 max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC); 243 min_clock = wrap_max(gtod, old_clock);
244 max_clock = wrap_max(old_clock, gtod + TICK_NSEC);
237 245
238 clock = wrap_max(clock, min_clock); 246 clock = wrap_max(clock, min_clock);
239 clock = wrap_min(clock, max_clock); 247 clock = wrap_min(clock, max_clock);
@@ -317,7 +325,7 @@ u64 sched_clock_cpu(int cpu)
317 u64 clock; 325 u64 clock;
318 326
319 if (sched_clock_stable()) 327 if (sched_clock_stable())
320 return sched_clock() + raw_offset; 328 return sched_clock() + __sched_clock_offset;
321 329
322 if (unlikely(!sched_clock_running)) 330 if (unlikely(!sched_clock_running))
323 return 0ull; 331 return 0ull;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index acf0a5a06da7..8c8714fcb53c 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2133,9 +2133,12 @@ static int do_proc_douintvec_conv(bool *negp, unsigned long *lvalp,
2133 if (write) { 2133 if (write) {
2134 if (*negp) 2134 if (*negp)
2135 return -EINVAL; 2135 return -EINVAL;
2136 if (*lvalp > UINT_MAX)
2137 return -EINVAL;
2136 *valp = *lvalp; 2138 *valp = *lvalp;
2137 } else { 2139 } else {
2138 unsigned int val = *valp; 2140 unsigned int val = *valp;
2141 *negp = false;
2139 *lvalp = (unsigned long)val; 2142 *lvalp = (unsigned long)val;
2140 } 2143 }
2141 return 0; 2144 return 0;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b9691ee8f6c1..27bb2e61276e 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3755,23 +3755,24 @@ static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
3755 ftrace_probe_registered = 1; 3755 ftrace_probe_registered = 1;
3756} 3756}
3757 3757
3758static void __disable_ftrace_function_probe(void) 3758static bool __disable_ftrace_function_probe(void)
3759{ 3759{
3760 int i; 3760 int i;
3761 3761
3762 if (!ftrace_probe_registered) 3762 if (!ftrace_probe_registered)
3763 return; 3763 return false;
3764 3764
3765 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { 3765 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3766 struct hlist_head *hhd = &ftrace_func_hash[i]; 3766 struct hlist_head *hhd = &ftrace_func_hash[i];
3767 if (hhd->first) 3767 if (hhd->first)
3768 return; 3768 return false;
3769 } 3769 }
3770 3770
3771 /* no more funcs left */ 3771 /* no more funcs left */
3772 ftrace_shutdown(&trace_probe_ops, 0); 3772 ftrace_shutdown(&trace_probe_ops, 0);
3773 3773
3774 ftrace_probe_registered = 0; 3774 ftrace_probe_registered = 0;
3775 return true;
3775} 3776}
3776 3777
3777 3778
@@ -3901,6 +3902,7 @@ static void
3901__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, 3902__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3902 void *data, int flags) 3903 void *data, int flags)
3903{ 3904{
3905 struct ftrace_ops_hash old_hash_ops;
3904 struct ftrace_func_entry *rec_entry; 3906 struct ftrace_func_entry *rec_entry;
3905 struct ftrace_func_probe *entry; 3907 struct ftrace_func_probe *entry;
3906 struct ftrace_func_probe *p; 3908 struct ftrace_func_probe *p;
@@ -3912,6 +3914,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3912 struct hlist_node *tmp; 3914 struct hlist_node *tmp;
3913 char str[KSYM_SYMBOL_LEN]; 3915 char str[KSYM_SYMBOL_LEN];
3914 int i, ret; 3916 int i, ret;
3917 bool disabled;
3915 3918
3916 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) 3919 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3917 func_g.search = NULL; 3920 func_g.search = NULL;
@@ -3930,6 +3933,10 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3930 3933
3931 mutex_lock(&trace_probe_ops.func_hash->regex_lock); 3934 mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3932 3935
3936 old_hash_ops.filter_hash = old_hash;
3937 /* Probes only have filters */
3938 old_hash_ops.notrace_hash = NULL;
3939
3933 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 3940 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3934 if (!hash) 3941 if (!hash)
3935 /* Hmm, should report this somehow */ 3942 /* Hmm, should report this somehow */
@@ -3967,12 +3974,17 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3967 } 3974 }
3968 } 3975 }
3969 mutex_lock(&ftrace_lock); 3976 mutex_lock(&ftrace_lock);
3970 __disable_ftrace_function_probe(); 3977 disabled = __disable_ftrace_function_probe();
3971 /* 3978 /*
3972 * Remove after the disable is called. Otherwise, if the last 3979 * Remove after the disable is called. Otherwise, if the last
3973 * probe is removed, a null hash means *all enabled*. 3980 * probe is removed, a null hash means *all enabled*.
3974 */ 3981 */
3975 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); 3982 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3983
3984 /* still need to update the function call sites */
3985 if (ftrace_enabled && !disabled)
3986 ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
3987 &old_hash_ops);
3976 synchronize_sched(); 3988 synchronize_sched();
3977 if (!ret) 3989 if (!ret)
3978 free_ftrace_hash_rcu(old_hash); 3990 free_ftrace_hash_rcu(old_hash);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 96fc3c043ad6..54e7a90db848 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -4826,9 +4826,9 @@ static __init int test_ringbuffer(void)
4826 rb_data[cpu].cnt = cpu; 4826 rb_data[cpu].cnt = cpu;
4827 rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu], 4827 rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
4828 "rbtester/%d", cpu); 4828 "rbtester/%d", cpu);
4829 if (WARN_ON(!rb_threads[cpu])) { 4829 if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
4830 pr_cont("FAILED\n"); 4830 pr_cont("FAILED\n");
4831 ret = -1; 4831 ret = PTR_ERR(rb_threads[cpu]);
4832 goto out_free; 4832 goto out_free;
4833 } 4833 }
4834 4834
@@ -4838,9 +4838,9 @@ static __init int test_ringbuffer(void)
4838 4838
4839 /* Now create the rb hammer! */ 4839 /* Now create the rb hammer! */
4840 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer"); 4840 rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
4841 if (WARN_ON(!rb_hammer)) { 4841 if (WARN_ON(IS_ERR(rb_hammer))) {
4842 pr_cont("FAILED\n"); 4842 pr_cont("FAILED\n");
4843 ret = -1; 4843 ret = PTR_ERR(rb_hammer);
4844 goto out_free; 4844 goto out_free;
4845 } 4845 }
4846 4846
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index e68604ae3ced..60abc44385b7 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -786,6 +786,68 @@ void iov_iter_advance(struct iov_iter *i, size_t size)
786} 786}
787EXPORT_SYMBOL(iov_iter_advance); 787EXPORT_SYMBOL(iov_iter_advance);
788 788
789void iov_iter_revert(struct iov_iter *i, size_t unroll)
790{
791 if (!unroll)
792 return;
793 i->count += unroll;
794 if (unlikely(i->type & ITER_PIPE)) {
795 struct pipe_inode_info *pipe = i->pipe;
796 int idx = i->idx;
797 size_t off = i->iov_offset;
798 while (1) {
799 size_t n = off - pipe->bufs[idx].offset;
800 if (unroll < n) {
801 off -= (n - unroll);
802 break;
803 }
804 unroll -= n;
805 if (!unroll && idx == i->start_idx) {
806 off = 0;
807 break;
808 }
809 if (!idx--)
810 idx = pipe->buffers - 1;
811 off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
812 }
813 i->iov_offset = off;
814 i->idx = idx;
815 pipe_truncate(i);
816 return;
817 }
818 if (unroll <= i->iov_offset) {
819 i->iov_offset -= unroll;
820 return;
821 }
822 unroll -= i->iov_offset;
823 if (i->type & ITER_BVEC) {
824 const struct bio_vec *bvec = i->bvec;
825 while (1) {
826 size_t n = (--bvec)->bv_len;
827 i->nr_segs++;
828 if (unroll <= n) {
829 i->bvec = bvec;
830 i->iov_offset = n - unroll;
831 return;
832 }
833 unroll -= n;
834 }
835 } else { /* same logics for iovec and kvec */
836 const struct iovec *iov = i->iov;
837 while (1) {
838 size_t n = (--iov)->iov_len;
839 i->nr_segs++;
840 if (unroll <= n) {
841 i->iov = iov;
842 i->iov_offset = n - unroll;
843 return;
844 }
845 unroll -= n;
846 }
847 }
848}
849EXPORT_SYMBOL(iov_iter_revert);
850
789/* 851/*
790 * Return the count of just the current iov_iter segment. 852 * Return the count of just the current iov_iter segment.
791 */ 853 */
@@ -839,6 +901,7 @@ void iov_iter_pipe(struct iov_iter *i, int direction,
839 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); 901 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
840 i->iov_offset = 0; 902 i->iov_offset = 0;
841 i->count = count; 903 i->count = count;
904 i->start_idx = i->idx;
842} 905}
843EXPORT_SYMBOL(iov_iter_pipe); 906EXPORT_SYMBOL(iov_iter_pipe);
844 907
diff --git a/lib/syscall.c b/lib/syscall.c
index 17d5ff5fa6a3..2c6cd1b5c3ea 100644
--- a/lib/syscall.c
+++ b/lib/syscall.c
@@ -12,6 +12,7 @@ static int collect_syscall(struct task_struct *target, long *callno,
12 12
13 if (!try_get_task_stack(target)) { 13 if (!try_get_task_stack(target)) {
14 /* Task has no stack, so the task isn't in a syscall. */ 14 /* Task has no stack, so the task isn't in a syscall. */
15 *sp = *pc = 0;
15 *callno = -1; 16 *callno = -1;
16 return 0; 17 return 0;
17 } 18 }
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 0b1d3140fbb8..a25c9763fce1 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -20,6 +20,7 @@
20#include <linux/string.h> 20#include <linux/string.h>
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/kasan.h>
23 24
24/* 25/*
25 * Note: test functions are marked noinline so that their names appear in 26 * Note: test functions are marked noinline so that their names appear in
@@ -474,6 +475,12 @@ static noinline void __init use_after_scope_test(void)
474 475
475static int __init kmalloc_tests_init(void) 476static int __init kmalloc_tests_init(void)
476{ 477{
478 /*
479 * Temporarily enable multi-shot mode. Otherwise, we'd only get a
480 * report for the first case.
481 */
482 bool multishot = kasan_save_enable_multi_shot();
483
477 kmalloc_oob_right(); 484 kmalloc_oob_right();
478 kmalloc_oob_left(); 485 kmalloc_oob_left();
479 kmalloc_node_oob_right(); 486 kmalloc_node_oob_right();
@@ -499,6 +506,9 @@ static int __init kmalloc_tests_init(void)
499 ksize_unpoisons_memory(); 506 ksize_unpoisons_memory();
500 copy_user_test(); 507 copy_user_test();
501 use_after_scope_test(); 508 use_after_scope_test();
509
510 kasan_restore_multi_shot(multishot);
511
502 return -EAGAIN; 512 return -EAGAIN;
503} 513}
504 514
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 1ebc93e179f3..f3c4f9d22821 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -240,18 +240,18 @@ static ssize_t defrag_store(struct kobject *kobj,
240 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 240 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
241 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 241 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
242 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 242 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
243 } else if (!memcmp("defer", buf,
244 min(sizeof("defer")-1, count))) {
245 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
246 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
247 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
248 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
249 } else if (!memcmp("defer+madvise", buf, 243 } else if (!memcmp("defer+madvise", buf,
250 min(sizeof("defer+madvise")-1, count))) { 244 min(sizeof("defer+madvise")-1, count))) {
251 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 245 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
252 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); 246 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
253 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); 247 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
254 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); 248 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
249 } else if (!memcmp("defer", buf,
250 min(sizeof("defer")-1, count))) {
251 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
252 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
253 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
254 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
255 } else if (!memcmp("madvise", buf, 255 } else if (!memcmp("madvise", buf,
256 min(sizeof("madvise")-1, count))) { 256 min(sizeof("madvise")-1, count))) {
257 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); 257 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
@@ -1568,8 +1568,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1568 deactivate_page(page); 1568 deactivate_page(page);
1569 1569
1570 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) { 1570 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
1571 orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, 1571 pmdp_invalidate(vma, addr, pmd);
1572 tlb->fullmm);
1573 orig_pmd = pmd_mkold(orig_pmd); 1572 orig_pmd = pmd_mkold(orig_pmd);
1574 orig_pmd = pmd_mkclean(orig_pmd); 1573 orig_pmd = pmd_mkclean(orig_pmd);
1575 1574
@@ -1724,37 +1723,69 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1724{ 1723{
1725 struct mm_struct *mm = vma->vm_mm; 1724 struct mm_struct *mm = vma->vm_mm;
1726 spinlock_t *ptl; 1725 spinlock_t *ptl;
1727 int ret = 0; 1726 pmd_t entry;
1727 bool preserve_write;
1728 int ret;
1728 1729
1729 ptl = __pmd_trans_huge_lock(pmd, vma); 1730 ptl = __pmd_trans_huge_lock(pmd, vma);
1730 if (ptl) { 1731 if (!ptl)
1731 pmd_t entry; 1732 return 0;
1732 bool preserve_write = prot_numa && pmd_write(*pmd);
1733 ret = 1;
1734 1733
1735 /* 1734 preserve_write = prot_numa && pmd_write(*pmd);
1736 * Avoid trapping faults against the zero page. The read-only 1735 ret = 1;
1737 * data is likely to be read-cached on the local CPU and
1738 * local/remote hits to the zero page are not interesting.
1739 */
1740 if (prot_numa && is_huge_zero_pmd(*pmd)) {
1741 spin_unlock(ptl);
1742 return ret;
1743 }
1744 1736
1745 if (!prot_numa || !pmd_protnone(*pmd)) { 1737 /*
1746 entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd); 1738 * Avoid trapping faults against the zero page. The read-only
1747 entry = pmd_modify(entry, newprot); 1739 * data is likely to be read-cached on the local CPU and
1748 if (preserve_write) 1740 * local/remote hits to the zero page are not interesting.
1749 entry = pmd_mk_savedwrite(entry); 1741 */
1750 ret = HPAGE_PMD_NR; 1742 if (prot_numa && is_huge_zero_pmd(*pmd))
1751 set_pmd_at(mm, addr, pmd, entry); 1743 goto unlock;
1752 BUG_ON(vma_is_anonymous(vma) && !preserve_write && 1744
1753 pmd_write(entry)); 1745 if (prot_numa && pmd_protnone(*pmd))
1754 } 1746 goto unlock;
1755 spin_unlock(ptl); 1747
1756 } 1748 /*
1749 * In case prot_numa, we are under down_read(mmap_sem). It's critical
1750 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
1751 * which is also under down_read(mmap_sem):
1752 *
1753 * CPU0: CPU1:
1754 * change_huge_pmd(prot_numa=1)
1755 * pmdp_huge_get_and_clear_notify()
1756 * madvise_dontneed()
1757 * zap_pmd_range()
1758 * pmd_trans_huge(*pmd) == 0 (without ptl)
1759 * // skip the pmd
1760 * set_pmd_at();
1761 * // pmd is re-established
1762 *
1763 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
1764 * which may break userspace.
1765 *
1766 * pmdp_invalidate() is required to make sure we don't miss
1767 * dirty/young flags set by hardware.
1768 */
1769 entry = *pmd;
1770 pmdp_invalidate(vma, addr, pmd);
1757 1771
1772 /*
1773 * Recover dirty/young flags. It relies on pmdp_invalidate to not
1774 * corrupt them.
1775 */
1776 if (pmd_dirty(*pmd))
1777 entry = pmd_mkdirty(entry);
1778 if (pmd_young(*pmd))
1779 entry = pmd_mkyoung(entry);
1780
1781 entry = pmd_modify(entry, newprot);
1782 if (preserve_write)
1783 entry = pmd_mk_savedwrite(entry);
1784 ret = HPAGE_PMD_NR;
1785 set_pmd_at(mm, addr, pmd, entry);
1786 BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry));
1787unlock:
1788 spin_unlock(ptl);
1758 return ret; 1789 return ret;
1759} 1790}
1760 1791
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3d0aab9ee80d..e5828875f7bb 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4403,7 +4403,9 @@ int hugetlb_reserve_pages(struct inode *inode,
4403 return 0; 4403 return 0;
4404out_err: 4404out_err:
4405 if (!vma || vma->vm_flags & VM_MAYSHARE) 4405 if (!vma || vma->vm_flags & VM_MAYSHARE)
4406 region_abort(resv_map, from, to); 4406 /* Don't call region_abort if region_chg failed */
4407 if (chg >= 0)
4408 region_abort(resv_map, from, to);
4407 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 4409 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4408 kref_put(&resv_map->refs, resv_map_release); 4410 kref_put(&resv_map->refs, resv_map_release);
4409 return ret; 4411 return ret;
@@ -4651,6 +4653,7 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4651{ 4653{
4652 struct page *page = NULL; 4654 struct page *page = NULL;
4653 spinlock_t *ptl; 4655 spinlock_t *ptl;
4656 pte_t pte;
4654retry: 4657retry:
4655 ptl = pmd_lockptr(mm, pmd); 4658 ptl = pmd_lockptr(mm, pmd);
4656 spin_lock(ptl); 4659 spin_lock(ptl);
@@ -4660,12 +4663,13 @@ retry:
4660 */ 4663 */
4661 if (!pmd_huge(*pmd)) 4664 if (!pmd_huge(*pmd))
4662 goto out; 4665 goto out;
4663 if (pmd_present(*pmd)) { 4666 pte = huge_ptep_get((pte_t *)pmd);
4667 if (pte_present(pte)) {
4664 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); 4668 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4665 if (flags & FOLL_GET) 4669 if (flags & FOLL_GET)
4666 get_page(page); 4670 get_page(page);
4667 } else { 4671 } else {
4668 if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) { 4672 if (is_hugetlb_entry_migration(pte)) {
4669 spin_unlock(ptl); 4673 spin_unlock(ptl);
4670 __migration_entry_wait(mm, (pte_t *)pmd, ptl); 4674 __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4671 goto retry; 4675 goto retry;
diff --git a/mm/internal.h b/mm/internal.h
index ccfc2a2969f4..266efaeaa370 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -481,6 +481,13 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
481enum ttu_flags; 481enum ttu_flags;
482struct tlbflush_unmap_batch; 482struct tlbflush_unmap_batch;
483 483
484
485/*
486 * only for MM internal work items which do not depend on
487 * any allocations or locks which might depend on allocations
488 */
489extern struct workqueue_struct *mm_percpu_wq;
490
484#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 491#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
485void try_to_unmap_flush(void); 492void try_to_unmap_flush(void);
486void try_to_unmap_flush_dirty(void); 493void try_to_unmap_flush_dirty(void);
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 1c260e6b3b3c..dd2dea8eb077 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -96,11 +96,6 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
96 << KASAN_SHADOW_SCALE_SHIFT); 96 << KASAN_SHADOW_SCALE_SHIFT);
97} 97}
98 98
99static inline bool kasan_report_enabled(void)
100{
101 return !current->kasan_depth;
102}
103
104void kasan_report(unsigned long addr, size_t size, 99void kasan_report(unsigned long addr, size_t size,
105 bool is_write, unsigned long ip); 100 bool is_write, unsigned long ip);
106void kasan_report_double_free(struct kmem_cache *cache, void *object, 101void kasan_report_double_free(struct kmem_cache *cache, void *object,
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index f479365530b6..ab42a0803f16 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -13,7 +13,9 @@
13 * 13 *
14 */ 14 */
15 15
16#include <linux/bitops.h>
16#include <linux/ftrace.h> 17#include <linux/ftrace.h>
18#include <linux/init.h>
17#include <linux/kernel.h> 19#include <linux/kernel.h>
18#include <linux/mm.h> 20#include <linux/mm.h>
19#include <linux/printk.h> 21#include <linux/printk.h>
@@ -293,6 +295,40 @@ static void kasan_report_error(struct kasan_access_info *info)
293 kasan_end_report(&flags); 295 kasan_end_report(&flags);
294} 296}
295 297
298static unsigned long kasan_flags;
299
300#define KASAN_BIT_REPORTED 0
301#define KASAN_BIT_MULTI_SHOT 1
302
303bool kasan_save_enable_multi_shot(void)
304{
305 return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
306}
307EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot);
308
309void kasan_restore_multi_shot(bool enabled)
310{
311 if (!enabled)
312 clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
313}
314EXPORT_SYMBOL_GPL(kasan_restore_multi_shot);
315
316static int __init kasan_set_multi_shot(char *str)
317{
318 set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
319 return 1;
320}
321__setup("kasan_multi_shot", kasan_set_multi_shot);
322
323static inline bool kasan_report_enabled(void)
324{
325 if (current->kasan_depth)
326 return false;
327 if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
328 return true;
329 return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags);
330}
331
296void kasan_report(unsigned long addr, size_t size, 332void kasan_report(unsigned long addr, size_t size,
297 bool is_write, unsigned long ip) 333 bool is_write, unsigned long ip)
298{ 334{
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 26c874e90b12..20036d4f9f13 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1416,7 +1416,7 @@ static void kmemleak_scan(void)
1416 /* data/bss scanning */ 1416 /* data/bss scanning */
1417 scan_large_block(_sdata, _edata); 1417 scan_large_block(_sdata, _edata);
1418 scan_large_block(__bss_start, __bss_stop); 1418 scan_large_block(__bss_start, __bss_stop);
1419 scan_large_block(__start_data_ro_after_init, __end_data_ro_after_init); 1419 scan_large_block(__start_ro_after_init, __end_ro_after_init);
1420 1420
1421#ifdef CONFIG_SMP 1421#ifdef CONFIG_SMP
1422 /* per-cpu sections scanning */ 1422 /* per-cpu sections scanning */
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 75b2745bac41..37d0b334bfe9 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1529,7 +1529,6 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1529COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, 1529COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1530 compat_ulong_t, maxnode) 1530 compat_ulong_t, maxnode)
1531{ 1531{
1532 long err = 0;
1533 unsigned long __user *nm = NULL; 1532 unsigned long __user *nm = NULL;
1534 unsigned long nr_bits, alloc_size; 1533 unsigned long nr_bits, alloc_size;
1535 DECLARE_BITMAP(bm, MAX_NUMNODES); 1534 DECLARE_BITMAP(bm, MAX_NUMNODES);
@@ -1538,14 +1537,13 @@ COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1538 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1537 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1539 1538
1540 if (nmask) { 1539 if (nmask) {
1541 err = compat_get_bitmap(bm, nmask, nr_bits); 1540 if (compat_get_bitmap(bm, nmask, nr_bits))
1541 return -EFAULT;
1542 nm = compat_alloc_user_space(alloc_size); 1542 nm = compat_alloc_user_space(alloc_size);
1543 err |= copy_to_user(nm, bm, alloc_size); 1543 if (copy_to_user(nm, bm, alloc_size))
1544 return -EFAULT;
1544 } 1545 }
1545 1546
1546 if (err)
1547 return -EFAULT;
1548
1549 return sys_set_mempolicy(mode, nm, nr_bits+1); 1547 return sys_set_mempolicy(mode, nm, nr_bits+1);
1550} 1548}
1551 1549
@@ -1553,7 +1551,6 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1553 compat_ulong_t, mode, compat_ulong_t __user *, nmask, 1551 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1554 compat_ulong_t, maxnode, compat_ulong_t, flags) 1552 compat_ulong_t, maxnode, compat_ulong_t, flags)
1555{ 1553{
1556 long err = 0;
1557 unsigned long __user *nm = NULL; 1554 unsigned long __user *nm = NULL;
1558 unsigned long nr_bits, alloc_size; 1555 unsigned long nr_bits, alloc_size;
1559 nodemask_t bm; 1556 nodemask_t bm;
@@ -1562,14 +1559,13 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1562 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; 1559 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1563 1560
1564 if (nmask) { 1561 if (nmask) {
1565 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); 1562 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1563 return -EFAULT;
1566 nm = compat_alloc_user_space(alloc_size); 1564 nm = compat_alloc_user_space(alloc_size);
1567 err |= copy_to_user(nm, nodes_addr(bm), alloc_size); 1565 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1566 return -EFAULT;
1568 } 1567 }
1569 1568
1570 if (err)
1571 return -EFAULT;
1572
1573 return sys_mbind(start, len, mode, nm, nr_bits+1, flags); 1569 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1574} 1570}
1575 1571
diff --git a/mm/migrate.c b/mm/migrate.c
index 9a0897a14d37..ed97c2c14fa8 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -209,8 +209,11 @@ static int remove_migration_pte(struct page *page, struct vm_area_struct *vma,
209 209
210 VM_BUG_ON_PAGE(PageTail(page), page); 210 VM_BUG_ON_PAGE(PageTail(page), page);
211 while (page_vma_mapped_walk(&pvmw)) { 211 while (page_vma_mapped_walk(&pvmw)) {
212 new = page - pvmw.page->index + 212 if (PageKsm(page))
213 linear_page_index(vma, pvmw.address); 213 new = page;
214 else
215 new = page - pvmw.page->index +
216 linear_page_index(vma, pvmw.address);
214 217
215 get_page(new); 218 get_page(new);
216 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); 219 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6cbde310abed..f3d603cef2c0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2373,6 +2373,13 @@ void drain_all_pages(struct zone *zone)
2373 */ 2373 */
2374 static cpumask_t cpus_with_pcps; 2374 static cpumask_t cpus_with_pcps;
2375 2375
2376 /*
2377 * Make sure nobody triggers this path before mm_percpu_wq is fully
2378 * initialized.
2379 */
2380 if (WARN_ON_ONCE(!mm_percpu_wq))
2381 return;
2382
2376 /* Workqueues cannot recurse */ 2383 /* Workqueues cannot recurse */
2377 if (current->flags & PF_WQ_WORKER) 2384 if (current->flags & PF_WQ_WORKER)
2378 return; 2385 return;
@@ -2422,7 +2429,7 @@ void drain_all_pages(struct zone *zone)
2422 for_each_cpu(cpu, &cpus_with_pcps) { 2429 for_each_cpu(cpu, &cpus_with_pcps) {
2423 struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu); 2430 struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
2424 INIT_WORK(work, drain_local_pages_wq); 2431 INIT_WORK(work, drain_local_pages_wq);
2425 schedule_work_on(cpu, work); 2432 queue_work_on(cpu, mm_percpu_wq, work);
2426 } 2433 }
2427 for_each_cpu(cpu, &cpus_with_pcps) 2434 for_each_cpu(cpu, &cpus_with_pcps)
2428 flush_work(per_cpu_ptr(&pcpu_drain, cpu)); 2435 flush_work(per_cpu_ptr(&pcpu_drain, cpu));
@@ -4519,13 +4526,13 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
4519 K(node_page_state(pgdat, NR_FILE_MAPPED)), 4526 K(node_page_state(pgdat, NR_FILE_MAPPED)),
4520 K(node_page_state(pgdat, NR_FILE_DIRTY)), 4527 K(node_page_state(pgdat, NR_FILE_DIRTY)),
4521 K(node_page_state(pgdat, NR_WRITEBACK)), 4528 K(node_page_state(pgdat, NR_WRITEBACK)),
4529 K(node_page_state(pgdat, NR_SHMEM)),
4522#ifdef CONFIG_TRANSPARENT_HUGEPAGE 4530#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4523 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR), 4531 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
4524 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) 4532 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
4525 * HPAGE_PMD_NR), 4533 * HPAGE_PMD_NR),
4526 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR), 4534 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
4527#endif 4535#endif
4528 K(node_page_state(pgdat, NR_SHMEM)),
4529 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), 4536 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
4530 K(node_page_state(pgdat, NR_UNSTABLE_NFS)), 4537 K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
4531 node_page_state(pgdat, NR_PAGES_SCANNED), 4538 node_page_state(pgdat, NR_PAGES_SCANNED),
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index c4c9def8ffea..de9c40d7304a 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -111,12 +111,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
111 if (pvmw->pmd && !pvmw->pte) 111 if (pvmw->pmd && !pvmw->pte)
112 return not_found(pvmw); 112 return not_found(pvmw);
113 113
114 /* Only for THP, seek to next pte entry makes sense */ 114 if (pvmw->pte)
115 if (pvmw->pte) {
116 if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
117 return not_found(pvmw);
118 goto next_pte; 115 goto next_pte;
119 }
120 116
121 if (unlikely(PageHuge(pvmw->page))) { 117 if (unlikely(PageHuge(pvmw->page))) {
122 /* when pud is not present, pte will be NULL */ 118 /* when pud is not present, pte will be NULL */
@@ -165,9 +161,14 @@ restart:
165 while (1) { 161 while (1) {
166 if (check_pte(pvmw)) 162 if (check_pte(pvmw))
167 return true; 163 return true;
168next_pte: do { 164next_pte:
165 /* Seek to next pte only makes sense for THP */
166 if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
167 return not_found(pvmw);
168 do {
169 pvmw->address += PAGE_SIZE; 169 pvmw->address += PAGE_SIZE;
170 if (pvmw->address >= 170 if (pvmw->address >= pvmw->vma->vm_end ||
171 pvmw->address >=
171 __vma_address(pvmw->page, pvmw->vma) + 172 __vma_address(pvmw->page, pvmw->vma) +
172 hpage_nr_pages(pvmw->page) * PAGE_SIZE) 173 hpage_nr_pages(pvmw->page) * PAGE_SIZE)
173 return not_found(pvmw); 174 return not_found(pvmw);
diff --git a/mm/rmap.c b/mm/rmap.c
index 49ed681ccc7b..f6838015810f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1159,7 +1159,7 @@ void page_add_file_rmap(struct page *page, bool compound)
1159 goto out; 1159 goto out;
1160 } 1160 }
1161 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr); 1161 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr);
1162 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); 1162 mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, nr);
1163out: 1163out:
1164 unlock_page_memcg(page); 1164 unlock_page_memcg(page);
1165} 1165}
@@ -1199,7 +1199,7 @@ static void page_remove_file_rmap(struct page *page, bool compound)
1199 * pte lock(a spinlock) is held, which implies preemption disabled. 1199 * pte lock(a spinlock) is held, which implies preemption disabled.
1200 */ 1200 */
1201 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr); 1201 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr);
1202 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); 1202 mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, -nr);
1203 1203
1204 if (unlikely(PageMlocked(page))) 1204 if (unlikely(PageMlocked(page)))
1205 clear_page_mlock(page); 1205 clear_page_mlock(page);
diff --git a/mm/swap.c b/mm/swap.c
index c4910f14f957..5dabf444d724 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -670,30 +670,19 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
670 670
671static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); 671static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
672 672
673/*
674 * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM
675 * workqueue, aiding in getting memory freed.
676 */
677static struct workqueue_struct *lru_add_drain_wq;
678
679static int __init lru_init(void)
680{
681 lru_add_drain_wq = alloc_workqueue("lru-add-drain", WQ_MEM_RECLAIM, 0);
682
683 if (WARN(!lru_add_drain_wq,
684 "Failed to create workqueue lru_add_drain_wq"))
685 return -ENOMEM;
686
687 return 0;
688}
689early_initcall(lru_init);
690
691void lru_add_drain_all(void) 673void lru_add_drain_all(void)
692{ 674{
693 static DEFINE_MUTEX(lock); 675 static DEFINE_MUTEX(lock);
694 static struct cpumask has_work; 676 static struct cpumask has_work;
695 int cpu; 677 int cpu;
696 678
679 /*
680 * Make sure nobody triggers this path before mm_percpu_wq is fully
681 * initialized.
682 */
683 if (WARN_ON(!mm_percpu_wq))
684 return;
685
697 mutex_lock(&lock); 686 mutex_lock(&lock);
698 get_online_cpus(); 687 get_online_cpus();
699 cpumask_clear(&has_work); 688 cpumask_clear(&has_work);
@@ -707,7 +696,7 @@ void lru_add_drain_all(void)
707 pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || 696 pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
708 need_activate_page_drain(cpu)) { 697 need_activate_page_drain(cpu)) {
709 INIT_WORK(work, lru_add_drain_per_cpu); 698 INIT_WORK(work, lru_add_drain_per_cpu);
710 queue_work_on(cpu, lru_add_drain_wq, work); 699 queue_work_on(cpu, mm_percpu_wq, work);
711 cpumask_set_cpu(cpu, &has_work); 700 cpumask_set_cpu(cpu, &has_work);
712 } 701 }
713 } 702 }
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
index 310ac0b8f974..ac6318a064d3 100644
--- a/mm/swap_cgroup.c
+++ b/mm/swap_cgroup.c
@@ -201,6 +201,8 @@ void swap_cgroup_swapoff(int type)
201 struct page *page = map[i]; 201 struct page *page = map[i];
202 if (page) 202 if (page)
203 __free_page(page); 203 __free_page(page);
204 if (!(i % SWAP_CLUSTER_MAX))
205 cond_resched();
204 } 206 }
205 vfree(map); 207 vfree(map);
206 } 208 }
diff --git a/mm/vmstat.c b/mm/vmstat.c
index b1947f0cbee2..809025ed97ea 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1552,7 +1552,6 @@ static const struct file_operations proc_vmstat_file_operations = {
1552#endif /* CONFIG_PROC_FS */ 1552#endif /* CONFIG_PROC_FS */
1553 1553
1554#ifdef CONFIG_SMP 1554#ifdef CONFIG_SMP
1555static struct workqueue_struct *vmstat_wq;
1556static DEFINE_PER_CPU(struct delayed_work, vmstat_work); 1555static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1557int sysctl_stat_interval __read_mostly = HZ; 1556int sysctl_stat_interval __read_mostly = HZ;
1558 1557
@@ -1623,7 +1622,7 @@ static void vmstat_update(struct work_struct *w)
1623 * to occur in the future. Keep on running the 1622 * to occur in the future. Keep on running the
1624 * update worker thread. 1623 * update worker thread.
1625 */ 1624 */
1626 queue_delayed_work_on(smp_processor_id(), vmstat_wq, 1625 queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
1627 this_cpu_ptr(&vmstat_work), 1626 this_cpu_ptr(&vmstat_work),
1628 round_jiffies_relative(sysctl_stat_interval)); 1627 round_jiffies_relative(sysctl_stat_interval));
1629 } 1628 }
@@ -1702,7 +1701,7 @@ static void vmstat_shepherd(struct work_struct *w)
1702 struct delayed_work *dw = &per_cpu(vmstat_work, cpu); 1701 struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
1703 1702
1704 if (!delayed_work_pending(dw) && need_update(cpu)) 1703 if (!delayed_work_pending(dw) && need_update(cpu))
1705 queue_delayed_work_on(cpu, vmstat_wq, dw, 0); 1704 queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
1706 } 1705 }
1707 put_online_cpus(); 1706 put_online_cpus();
1708 1707
@@ -1718,7 +1717,6 @@ static void __init start_shepherd_timer(void)
1718 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu), 1717 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
1719 vmstat_update); 1718 vmstat_update);
1720 1719
1721 vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1722 schedule_delayed_work(&shepherd, 1720 schedule_delayed_work(&shepherd,
1723 round_jiffies_relative(sysctl_stat_interval)); 1721 round_jiffies_relative(sysctl_stat_interval));
1724} 1722}
@@ -1764,11 +1762,16 @@ static int vmstat_cpu_dead(unsigned int cpu)
1764 1762
1765#endif 1763#endif
1766 1764
1767static int __init setup_vmstat(void) 1765struct workqueue_struct *mm_percpu_wq;
1766
1767void __init init_mm_internals(void)
1768{ 1768{
1769#ifdef CONFIG_SMP 1769 int ret __maybe_unused;
1770 int ret; 1770
1771 mm_percpu_wq = alloc_workqueue("mm_percpu_wq",
1772 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1771 1773
1774#ifdef CONFIG_SMP
1772 ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead", 1775 ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
1773 NULL, vmstat_cpu_dead); 1776 NULL, vmstat_cpu_dead);
1774 if (ret < 0) 1777 if (ret < 0)
@@ -1792,9 +1795,7 @@ static int __init setup_vmstat(void)
1792 proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations); 1795 proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
1793 proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations); 1796 proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
1794#endif 1797#endif
1795 return 0;
1796} 1798}
1797module_init(setup_vmstat)
1798 1799
1799#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION) 1800#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1800 1801
diff --git a/mm/workingset.c b/mm/workingset.c
index ac839fca0e76..eda05c71fa49 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -532,7 +532,7 @@ static int __init workingset_init(void)
532 pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", 532 pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
533 timestamp_bits, max_order, bucket_order); 533 timestamp_bits, max_order, bucket_order);
534 534
535 ret = list_lru_init_key(&shadow_nodes, &shadow_nodes_key); 535 ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key);
536 if (ret) 536 if (ret)
537 goto err; 537 goto err;
538 ret = register_shrinker(&workingset_shadow_shrinker); 538 ret = register_shrinker(&workingset_shadow_shrinker);
diff --git a/mm/z3fold.c b/mm/z3fold.c
index f9492bccfd79..54f63c4a809a 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -185,6 +185,12 @@ static inline void z3fold_page_lock(struct z3fold_header *zhdr)
185 spin_lock(&zhdr->page_lock); 185 spin_lock(&zhdr->page_lock);
186} 186}
187 187
188/* Try to lock a z3fold page */
189static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
190{
191 return spin_trylock(&zhdr->page_lock);
192}
193
188/* Unlock a z3fold page */ 194/* Unlock a z3fold page */
189static inline void z3fold_page_unlock(struct z3fold_header *zhdr) 195static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
190{ 196{
@@ -385,7 +391,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
385 spin_lock(&pool->lock); 391 spin_lock(&pool->lock);
386 zhdr = list_first_entry_or_null(&pool->unbuddied[i], 392 zhdr = list_first_entry_or_null(&pool->unbuddied[i],
387 struct z3fold_header, buddy); 393 struct z3fold_header, buddy);
388 if (!zhdr) { 394 if (!zhdr || !z3fold_page_trylock(zhdr)) {
389 spin_unlock(&pool->lock); 395 spin_unlock(&pool->lock);
390 continue; 396 continue;
391 } 397 }
@@ -394,7 +400,6 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
394 spin_unlock(&pool->lock); 400 spin_unlock(&pool->lock);
395 401
396 page = virt_to_page(zhdr); 402 page = virt_to_page(zhdr);
397 z3fold_page_lock(zhdr);
398 if (zhdr->first_chunks == 0) { 403 if (zhdr->first_chunks == 0) {
399 if (zhdr->middle_chunks != 0 && 404 if (zhdr->middle_chunks != 0 &&
400 chunks >= zhdr->start_middle) 405 chunks >= zhdr->start_middle)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index b7ee9c34dbd6..d41edd28298b 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -276,7 +276,7 @@ struct zs_pool {
276struct zspage { 276struct zspage {
277 struct { 277 struct {
278 unsigned int fullness:FULLNESS_BITS; 278 unsigned int fullness:FULLNESS_BITS;
279 unsigned int class:CLASS_BITS; 279 unsigned int class:CLASS_BITS + 1;
280 unsigned int isolated:ISOLATED_BITS; 280 unsigned int isolated:ISOLATED_BITS;
281 unsigned int magic:MAGIC_VAL_BITS; 281 unsigned int magic:MAGIC_VAL_BITS;
282 }; 282 };
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index ea71513fca21..90f49a194249 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -119,6 +119,15 @@ static int br_dev_init(struct net_device *dev)
119 return err; 119 return err;
120} 120}
121 121
122static void br_dev_uninit(struct net_device *dev)
123{
124 struct net_bridge *br = netdev_priv(dev);
125
126 br_multicast_uninit_stats(br);
127 br_vlan_flush(br);
128 free_percpu(br->stats);
129}
130
122static int br_dev_open(struct net_device *dev) 131static int br_dev_open(struct net_device *dev)
123{ 132{
124 struct net_bridge *br = netdev_priv(dev); 133 struct net_bridge *br = netdev_priv(dev);
@@ -332,6 +341,7 @@ static const struct net_device_ops br_netdev_ops = {
332 .ndo_open = br_dev_open, 341 .ndo_open = br_dev_open,
333 .ndo_stop = br_dev_stop, 342 .ndo_stop = br_dev_stop,
334 .ndo_init = br_dev_init, 343 .ndo_init = br_dev_init,
344 .ndo_uninit = br_dev_uninit,
335 .ndo_start_xmit = br_dev_xmit, 345 .ndo_start_xmit = br_dev_xmit,
336 .ndo_get_stats64 = br_get_stats64, 346 .ndo_get_stats64 = br_get_stats64,
337 .ndo_set_mac_address = br_set_mac_address, 347 .ndo_set_mac_address = br_set_mac_address,
@@ -356,14 +366,6 @@ static const struct net_device_ops br_netdev_ops = {
356 .ndo_features_check = passthru_features_check, 366 .ndo_features_check = passthru_features_check,
357}; 367};
358 368
359static void br_dev_free(struct net_device *dev)
360{
361 struct net_bridge *br = netdev_priv(dev);
362
363 free_percpu(br->stats);
364 free_netdev(dev);
365}
366
367static struct device_type br_type = { 369static struct device_type br_type = {
368 .name = "bridge", 370 .name = "bridge",
369}; 371};
@@ -376,7 +378,7 @@ void br_dev_setup(struct net_device *dev)
376 ether_setup(dev); 378 ether_setup(dev);
377 379
378 dev->netdev_ops = &br_netdev_ops; 380 dev->netdev_ops = &br_netdev_ops;
379 dev->destructor = br_dev_free; 381 dev->destructor = free_netdev;
380 dev->ethtool_ops = &br_ethtool_ops; 382 dev->ethtool_ops = &br_ethtool_ops;
381 SET_NETDEV_DEVTYPE(dev, &br_type); 383 SET_NETDEV_DEVTYPE(dev, &br_type);
382 dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE; 384 dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 8ac1770aa222..56a2a72e7738 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -311,7 +311,6 @@ void br_dev_delete(struct net_device *dev, struct list_head *head)
311 311
312 br_fdb_delete_by_port(br, NULL, 0, 1); 312 br_fdb_delete_by_port(br, NULL, 0, 1);
313 313
314 br_vlan_flush(br);
315 br_multicast_dev_del(br); 314 br_multicast_dev_del(br);
316 cancel_delayed_work_sync(&br->gc_work); 315 cancel_delayed_work_sync(&br->gc_work);
317 316
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index b760f2620abf..faa7261a992f 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -2031,8 +2031,6 @@ void br_multicast_dev_del(struct net_bridge *br)
2031 2031
2032out: 2032out:
2033 spin_unlock_bh(&br->multicast_lock); 2033 spin_unlock_bh(&br->multicast_lock);
2034
2035 free_percpu(br->mcast_stats);
2036} 2034}
2037 2035
2038int br_multicast_set_router(struct net_bridge *br, unsigned long val) 2036int br_multicast_set_router(struct net_bridge *br, unsigned long val)
@@ -2531,6 +2529,11 @@ int br_multicast_init_stats(struct net_bridge *br)
2531 return 0; 2529 return 0;
2532} 2530}
2533 2531
2532void br_multicast_uninit_stats(struct net_bridge *br)
2533{
2534 free_percpu(br->mcast_stats);
2535}
2536
2534static void mcast_stats_add_dir(u64 *dst, u64 *src) 2537static void mcast_stats_add_dir(u64 *dst, u64 *src)
2535{ 2538{
2536 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; 2539 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index a8f6acd23e30..225ef7d53701 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -1165,11 +1165,14 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev,
1165 spin_unlock_bh(&br->lock); 1165 spin_unlock_bh(&br->lock);
1166 } 1166 }
1167 1167
1168 err = br_changelink(dev, tb, data); 1168 err = register_netdevice(dev);
1169 if (err) 1169 if (err)
1170 return err; 1170 return err;
1171 1171
1172 return register_netdevice(dev); 1172 err = br_changelink(dev, tb, data);
1173 if (err)
1174 unregister_netdevice(dev);
1175 return err;
1173} 1176}
1174 1177
1175static size_t br_get_size(const struct net_device *brdev) 1178static size_t br_get_size(const struct net_device *brdev)
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 61368186edea..0d177280aa84 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -620,6 +620,7 @@ void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
620void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p, 620void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
621 const struct sk_buff *skb, u8 type, u8 dir); 621 const struct sk_buff *skb, u8 type, u8 dir);
622int br_multicast_init_stats(struct net_bridge *br); 622int br_multicast_init_stats(struct net_bridge *br);
623void br_multicast_uninit_stats(struct net_bridge *br);
623void br_multicast_get_stats(const struct net_bridge *br, 624void br_multicast_get_stats(const struct net_bridge *br,
624 const struct net_bridge_port *p, 625 const struct net_bridge_port *p,
625 struct br_mcast_stats *dest); 626 struct br_mcast_stats *dest);
@@ -760,6 +761,10 @@ static inline int br_multicast_init_stats(struct net_bridge *br)
760 return 0; 761 return 0;
761} 762}
762 763
764static inline void br_multicast_uninit_stats(struct net_bridge *br)
765{
766}
767
763static inline int br_multicast_igmp_type(const struct sk_buff *skb) 768static inline int br_multicast_igmp_type(const struct sk_buff *skb)
764{ 769{
765 return 0; 770 return 0;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index ea633342ab0d..f4947e737f34 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -398,7 +398,7 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
398 struct iov_iter *to, int len) 398 struct iov_iter *to, int len)
399{ 399{
400 int start = skb_headlen(skb); 400 int start = skb_headlen(skb);
401 int i, copy = start - offset; 401 int i, copy = start - offset, start_off = offset, n;
402 struct sk_buff *frag_iter; 402 struct sk_buff *frag_iter;
403 403
404 trace_skb_copy_datagram_iovec(skb, len); 404 trace_skb_copy_datagram_iovec(skb, len);
@@ -407,11 +407,12 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
407 if (copy > 0) { 407 if (copy > 0) {
408 if (copy > len) 408 if (copy > len)
409 copy = len; 409 copy = len;
410 if (copy_to_iter(skb->data + offset, copy, to) != copy) 410 n = copy_to_iter(skb->data + offset, copy, to);
411 offset += n;
412 if (n != copy)
411 goto short_copy; 413 goto short_copy;
412 if ((len -= copy) == 0) 414 if ((len -= copy) == 0)
413 return 0; 415 return 0;
414 offset += copy;
415 } 416 }
416 417
417 /* Copy paged appendix. Hmm... why does this look so complicated? */ 418 /* Copy paged appendix. Hmm... why does this look so complicated? */
@@ -425,13 +426,14 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
425 if ((copy = end - offset) > 0) { 426 if ((copy = end - offset) > 0) {
426 if (copy > len) 427 if (copy > len)
427 copy = len; 428 copy = len;
428 if (copy_page_to_iter(skb_frag_page(frag), 429 n = copy_page_to_iter(skb_frag_page(frag),
429 frag->page_offset + offset - 430 frag->page_offset + offset -
430 start, copy, to) != copy) 431 start, copy, to);
432 offset += n;
433 if (n != copy)
431 goto short_copy; 434 goto short_copy;
432 if (!(len -= copy)) 435 if (!(len -= copy))
433 return 0; 436 return 0;
434 offset += copy;
435 } 437 }
436 start = end; 438 start = end;
437 } 439 }
@@ -463,6 +465,7 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
463 */ 465 */
464 466
465fault: 467fault:
468 iov_iter_revert(to, offset - start_off);
466 return -EFAULT; 469 return -EFAULT;
467 470
468short_copy: 471short_copy:
@@ -613,7 +616,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
613 __wsum *csump) 616 __wsum *csump)
614{ 617{
615 int start = skb_headlen(skb); 618 int start = skb_headlen(skb);
616 int i, copy = start - offset; 619 int i, copy = start - offset, start_off = offset;
617 struct sk_buff *frag_iter; 620 struct sk_buff *frag_iter;
618 int pos = 0; 621 int pos = 0;
619 int n; 622 int n;
@@ -623,11 +626,11 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
623 if (copy > len) 626 if (copy > len)
624 copy = len; 627 copy = len;
625 n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to); 628 n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to);
629 offset += n;
626 if (n != copy) 630 if (n != copy)
627 goto fault; 631 goto fault;
628 if ((len -= copy) == 0) 632 if ((len -= copy) == 0)
629 return 0; 633 return 0;
630 offset += copy;
631 pos = copy; 634 pos = copy;
632 } 635 }
633 636
@@ -649,12 +652,12 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
649 offset - start, copy, 652 offset - start, copy,
650 &csum2, to); 653 &csum2, to);
651 kunmap(page); 654 kunmap(page);
655 offset += n;
652 if (n != copy) 656 if (n != copy)
653 goto fault; 657 goto fault;
654 *csump = csum_block_add(*csump, csum2, pos); 658 *csump = csum_block_add(*csump, csum2, pos);
655 if (!(len -= copy)) 659 if (!(len -= copy))
656 return 0; 660 return 0;
657 offset += copy;
658 pos += copy; 661 pos += copy;
659 } 662 }
660 start = end; 663 start = end;
@@ -687,6 +690,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
687 return 0; 690 return 0;
688 691
689fault: 692fault:
693 iov_iter_revert(to, offset - start_off);
690 return -EFAULT; 694 return -EFAULT;
691} 695}
692 696
@@ -771,6 +775,7 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
771 } 775 }
772 return 0; 776 return 0;
773csum_error: 777csum_error:
778 iov_iter_revert(&msg->msg_iter, chunk);
774 return -EINVAL; 779 return -EINVAL;
775fault: 780fault:
776 return -EFAULT; 781 return -EFAULT;
diff --git a/net/core/dev.c b/net/core/dev.c
index 7869ae3837ca..533a6d6f6092 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6757,7 +6757,6 @@ int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags)
6757 6757
6758 return err; 6758 return err;
6759} 6759}
6760EXPORT_SYMBOL(dev_change_xdp_fd);
6761 6760
6762/** 6761/**
6763 * dev_new_index - allocate an ifindex 6762 * dev_new_index - allocate an ifindex
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index c35aae13c8d2..d98d4998213d 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -390,7 +390,7 @@ mpls:
390 unsigned char ar_tip[4]; 390 unsigned char ar_tip[4];
391 } *arp_eth, _arp_eth; 391 } *arp_eth, _arp_eth;
392 const struct arphdr *arp; 392 const struct arphdr *arp;
393 struct arphdr *_arp; 393 struct arphdr _arp;
394 394
395 arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data, 395 arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
396 hlen, &_arp); 396 hlen, &_arp);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index e7c12caa20c8..4526cbd7e28a 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -860,7 +860,8 @@ static void neigh_probe(struct neighbour *neigh)
860 if (skb) 860 if (skb)
861 skb = skb_clone(skb, GFP_ATOMIC); 861 skb = skb_clone(skb, GFP_ATOMIC);
862 write_unlock(&neigh->lock); 862 write_unlock(&neigh->lock);
863 neigh->ops->solicit(neigh, skb); 863 if (neigh->ops->solicit)
864 neigh->ops->solicit(neigh, skb);
864 atomic_inc(&neigh->probes); 865 atomic_inc(&neigh->probes);
865 kfree_skb(skb); 866 kfree_skb(skb);
866} 867}
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 758f140b6bed..d28da7d363f1 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -20,9 +20,11 @@
20#include <net/tcp.h> 20#include <net/tcp.h>
21 21
22static siphash_key_t net_secret __read_mostly; 22static siphash_key_t net_secret __read_mostly;
23static siphash_key_t ts_secret __read_mostly;
23 24
24static __always_inline void net_secret_init(void) 25static __always_inline void net_secret_init(void)
25{ 26{
27 net_get_random_once(&ts_secret, sizeof(ts_secret));
26 net_get_random_once(&net_secret, sizeof(net_secret)); 28 net_get_random_once(&net_secret, sizeof(net_secret));
27} 29}
28#endif 30#endif
@@ -45,6 +47,23 @@ static u32 seq_scale(u32 seq)
45#endif 47#endif
46 48
47#if IS_ENABLED(CONFIG_IPV6) 49#if IS_ENABLED(CONFIG_IPV6)
50static u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr)
51{
52 const struct {
53 struct in6_addr saddr;
54 struct in6_addr daddr;
55 } __aligned(SIPHASH_ALIGNMENT) combined = {
56 .saddr = *(struct in6_addr *)saddr,
57 .daddr = *(struct in6_addr *)daddr,
58 };
59
60 if (sysctl_tcp_timestamps != 1)
61 return 0;
62
63 return siphash(&combined, offsetofend(typeof(combined), daddr),
64 &ts_secret);
65}
66
48u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, 67u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
49 __be16 sport, __be16 dport, u32 *tsoff) 68 __be16 sport, __be16 dport, u32 *tsoff)
50{ 69{
@@ -63,7 +82,7 @@ u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
63 net_secret_init(); 82 net_secret_init();
64 hash = siphash(&combined, offsetofend(typeof(combined), dport), 83 hash = siphash(&combined, offsetofend(typeof(combined), dport),
65 &net_secret); 84 &net_secret);
66 *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0; 85 *tsoff = secure_tcpv6_ts_off(saddr, daddr);
67 return seq_scale(hash); 86 return seq_scale(hash);
68} 87}
69EXPORT_SYMBOL(secure_tcpv6_sequence_number); 88EXPORT_SYMBOL(secure_tcpv6_sequence_number);
@@ -88,6 +107,14 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
88#endif 107#endif
89 108
90#ifdef CONFIG_INET 109#ifdef CONFIG_INET
110static u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr)
111{
112 if (sysctl_tcp_timestamps != 1)
113 return 0;
114
115 return siphash_2u32((__force u32)saddr, (__force u32)daddr,
116 &ts_secret);
117}
91 118
92/* secure_tcp_sequence_number(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d), 119/* secure_tcp_sequence_number(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d),
93 * but fortunately, `sport' cannot be 0 in any circumstances. If this changes, 120 * but fortunately, `sport' cannot be 0 in any circumstances. If this changes,
@@ -103,7 +130,7 @@ u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
103 hash = siphash_3u32((__force u32)saddr, (__force u32)daddr, 130 hash = siphash_3u32((__force u32)saddr, (__force u32)daddr,
104 (__force u32)sport << 16 | (__force u32)dport, 131 (__force u32)sport << 16 | (__force u32)dport,
105 &net_secret); 132 &net_secret);
106 *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0; 133 *tsoff = secure_tcp_ts_off(saddr, daddr);
107 return seq_scale(hash); 134 return seq_scale(hash);
108} 135}
109 136
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 4ead336e14ea..7f9cc400eca0 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -408,14 +408,16 @@ static struct ctl_table net_core_table[] = {
408 .data = &sysctl_net_busy_poll, 408 .data = &sysctl_net_busy_poll,
409 .maxlen = sizeof(unsigned int), 409 .maxlen = sizeof(unsigned int),
410 .mode = 0644, 410 .mode = 0644,
411 .proc_handler = proc_dointvec 411 .proc_handler = proc_dointvec_minmax,
412 .extra1 = &zero,
412 }, 413 },
413 { 414 {
414 .procname = "busy_read", 415 .procname = "busy_read",
415 .data = &sysctl_net_busy_read, 416 .data = &sysctl_net_busy_read,
416 .maxlen = sizeof(unsigned int), 417 .maxlen = sizeof(unsigned int),
417 .mode = 0644, 418 .mode = 0644,
418 .proc_handler = proc_dointvec 419 .proc_handler = proc_dointvec_minmax,
420 .extra1 = &zero,
419 }, 421 },
420#endif 422#endif
421#ifdef CONFIG_NET_SCHED 423#ifdef CONFIG_NET_SCHED
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index fd9f34bbd740..dfb2ab2dd3c8 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -306,7 +306,7 @@ static void __init ic_close_devs(void)
306 while ((d = next)) { 306 while ((d = next)) {
307 next = d->next; 307 next = d->next;
308 dev = d->dev; 308 dev = d->dev;
309 if ((!ic_dev || dev != ic_dev->dev) && !netdev_uses_dsa(dev)) { 309 if (d != ic_dev && !netdev_uses_dsa(dev)) {
310 pr_debug("IP-Config: Downing %s\n", dev->name); 310 pr_debug("IP-Config: Downing %s\n", dev->name);
311 dev_change_flags(dev, d->flags); 311 dev_change_flags(dev, d->flags);
312 } 312 }
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 52f26459efc3..9b8841316e7b 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -461,7 +461,7 @@ static void clusterip_tg_destroy(const struct xt_tgdtor_param *par)
461 461
462 clusterip_config_put(cipinfo->config); 462 clusterip_config_put(cipinfo->config);
463 463
464 nf_ct_netns_get(par->net, par->family); 464 nf_ct_netns_put(par->net, par->family);
465} 465}
466 466
467#ifdef CONFIG_COMPAT 467#ifdef CONFIG_COMPAT
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index c9b52c361da2..53e49f5011d3 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -1260,16 +1260,6 @@ static const struct nf_conntrack_expect_policy snmp_exp_policy = {
1260 .timeout = 180, 1260 .timeout = 180,
1261}; 1261};
1262 1262
1263static struct nf_conntrack_helper snmp_helper __read_mostly = {
1264 .me = THIS_MODULE,
1265 .help = help,
1266 .expect_policy = &snmp_exp_policy,
1267 .name = "snmp",
1268 .tuple.src.l3num = AF_INET,
1269 .tuple.src.u.udp.port = cpu_to_be16(SNMP_PORT),
1270 .tuple.dst.protonum = IPPROTO_UDP,
1271};
1272
1273static struct nf_conntrack_helper snmp_trap_helper __read_mostly = { 1263static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
1274 .me = THIS_MODULE, 1264 .me = THIS_MODULE,
1275 .help = help, 1265 .help = help,
@@ -1288,22 +1278,16 @@ static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
1288 1278
1289static int __init nf_nat_snmp_basic_init(void) 1279static int __init nf_nat_snmp_basic_init(void)
1290{ 1280{
1291 int ret = 0;
1292
1293 BUG_ON(nf_nat_snmp_hook != NULL); 1281 BUG_ON(nf_nat_snmp_hook != NULL);
1294 RCU_INIT_POINTER(nf_nat_snmp_hook, help); 1282 RCU_INIT_POINTER(nf_nat_snmp_hook, help);
1295 1283
1296 ret = nf_conntrack_helper_register(&snmp_trap_helper); 1284 return nf_conntrack_helper_register(&snmp_trap_helper);
1297 if (ret < 0) {
1298 nf_conntrack_helper_unregister(&snmp_helper);
1299 return ret;
1300 }
1301 return ret;
1302} 1285}
1303 1286
1304static void __exit nf_nat_snmp_basic_fini(void) 1287static void __exit nf_nat_snmp_basic_fini(void)
1305{ 1288{
1306 RCU_INIT_POINTER(nf_nat_snmp_hook, NULL); 1289 RCU_INIT_POINTER(nf_nat_snmp_hook, NULL);
1290 synchronize_rcu();
1307 nf_conntrack_helper_unregister(&snmp_trap_helper); 1291 nf_conntrack_helper_unregister(&snmp_trap_helper);
1308} 1292}
1309 1293
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 2af6244b83e2..ccfbce13a633 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -156,17 +156,18 @@ int ping_hash(struct sock *sk)
156void ping_unhash(struct sock *sk) 156void ping_unhash(struct sock *sk)
157{ 157{
158 struct inet_sock *isk = inet_sk(sk); 158 struct inet_sock *isk = inet_sk(sk);
159
159 pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num); 160 pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
161 write_lock_bh(&ping_table.lock);
160 if (sk_hashed(sk)) { 162 if (sk_hashed(sk)) {
161 write_lock_bh(&ping_table.lock);
162 hlist_nulls_del(&sk->sk_nulls_node); 163 hlist_nulls_del(&sk->sk_nulls_node);
163 sk_nulls_node_init(&sk->sk_nulls_node); 164 sk_nulls_node_init(&sk->sk_nulls_node);
164 sock_put(sk); 165 sock_put(sk);
165 isk->inet_num = 0; 166 isk->inet_num = 0;
166 isk->inet_sport = 0; 167 isk->inet_sport = 0;
167 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 168 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
168 write_unlock_bh(&ping_table.lock);
169 } 169 }
170 write_unlock_bh(&ping_table.lock);
170} 171}
171EXPORT_SYMBOL_GPL(ping_unhash); 172EXPORT_SYMBOL_GPL(ping_unhash);
172 173
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 8471dd116771..acd69cfe2951 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2620,7 +2620,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
2620 skb_reset_network_header(skb); 2620 skb_reset_network_header(skb);
2621 2621
2622 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */ 2622 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2623 ip_hdr(skb)->protocol = IPPROTO_ICMP; 2623 ip_hdr(skb)->protocol = IPPROTO_UDP;
2624 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr)); 2624 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2625 2625
2626 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0; 2626 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1e319a525d51..40ba4249a586 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2322,6 +2322,7 @@ int tcp_disconnect(struct sock *sk, int flags)
2322 tcp_init_send_head(sk); 2322 tcp_init_send_head(sk);
2323 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 2323 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
2324 __sk_dst_reset(sk); 2324 __sk_dst_reset(sk);
2325 tcp_saved_syn_free(tp);
2325 2326
2326 /* Clean up fastopen related fields */ 2327 /* Clean up fastopen related fields */
2327 tcp_free_fastopen_req(tp); 2328 tcp_free_fastopen_req(tp);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index c43119726a62..659d1baefb2b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -126,7 +126,8 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
126#define REXMIT_LOST 1 /* retransmit packets marked lost */ 126#define REXMIT_LOST 1 /* retransmit packets marked lost */
127#define REXMIT_NEW 2 /* FRTO-style transmit of unsent/new packets */ 127#define REXMIT_NEW 2 /* FRTO-style transmit of unsent/new packets */
128 128
129static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb) 129static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb,
130 unsigned int len)
130{ 131{
131 static bool __once __read_mostly; 132 static bool __once __read_mostly;
132 133
@@ -137,8 +138,9 @@ static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb)
137 138
138 rcu_read_lock(); 139 rcu_read_lock();
139 dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif); 140 dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif);
140 pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n", 141 if (!dev || len >= dev->mtu)
141 dev ? dev->name : "Unknown driver"); 142 pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n",
143 dev ? dev->name : "Unknown driver");
142 rcu_read_unlock(); 144 rcu_read_unlock();
143 } 145 }
144} 146}
@@ -161,8 +163,10 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
161 if (len >= icsk->icsk_ack.rcv_mss) { 163 if (len >= icsk->icsk_ack.rcv_mss) {
162 icsk->icsk_ack.rcv_mss = min_t(unsigned int, len, 164 icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
163 tcp_sk(sk)->advmss); 165 tcp_sk(sk)->advmss);
164 if (unlikely(icsk->icsk_ack.rcv_mss != len)) 166 /* Account for possibly-removed options */
165 tcp_gro_dev_warn(sk, skb); 167 if (unlikely(len > icsk->icsk_ack.rcv_mss +
168 MAX_TCP_OPTION_SPACE))
169 tcp_gro_dev_warn(sk, skb, len);
166 } else { 170 } else {
167 /* Otherwise, we make more careful check taking into account, 171 /* Otherwise, we make more careful check taking into account,
168 * that SACKs block is variable. 172 * that SACKs block is variable.
@@ -874,22 +878,11 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
874 const int ts) 878 const int ts)
875{ 879{
876 struct tcp_sock *tp = tcp_sk(sk); 880 struct tcp_sock *tp = tcp_sk(sk);
877 if (metric > tp->reordering) { 881 int mib_idx;
878 int mib_idx;
879 882
883 if (metric > tp->reordering) {
880 tp->reordering = min(sysctl_tcp_max_reordering, metric); 884 tp->reordering = min(sysctl_tcp_max_reordering, metric);
881 885
882 /* This exciting event is worth to be remembered. 8) */
883 if (ts)
884 mib_idx = LINUX_MIB_TCPTSREORDER;
885 else if (tcp_is_reno(tp))
886 mib_idx = LINUX_MIB_TCPRENOREORDER;
887 else if (tcp_is_fack(tp))
888 mib_idx = LINUX_MIB_TCPFACKREORDER;
889 else
890 mib_idx = LINUX_MIB_TCPSACKREORDER;
891
892 NET_INC_STATS(sock_net(sk), mib_idx);
893#if FASTRETRANS_DEBUG > 1 886#if FASTRETRANS_DEBUG > 1
894 pr_debug("Disorder%d %d %u f%u s%u rr%d\n", 887 pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
895 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, 888 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
@@ -902,6 +895,18 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
902 } 895 }
903 896
904 tp->rack.reord = 1; 897 tp->rack.reord = 1;
898
899 /* This exciting event is worth to be remembered. 8) */
900 if (ts)
901 mib_idx = LINUX_MIB_TCPTSREORDER;
902 else if (tcp_is_reno(tp))
903 mib_idx = LINUX_MIB_TCPRENOREORDER;
904 else if (tcp_is_fack(tp))
905 mib_idx = LINUX_MIB_TCPFACKREORDER;
906 else
907 mib_idx = LINUX_MIB_TCPSACKREORDER;
908
909 NET_INC_STATS(sock_net(sk), mib_idx);
905} 910}
906 911
907/* This must be called before lost_out is incremented */ 912/* This must be called before lost_out is incremented */
@@ -1930,6 +1935,7 @@ void tcp_enter_loss(struct sock *sk)
1930 struct tcp_sock *tp = tcp_sk(sk); 1935 struct tcp_sock *tp = tcp_sk(sk);
1931 struct net *net = sock_net(sk); 1936 struct net *net = sock_net(sk);
1932 struct sk_buff *skb; 1937 struct sk_buff *skb;
1938 bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
1933 bool is_reneg; /* is receiver reneging on SACKs? */ 1939 bool is_reneg; /* is receiver reneging on SACKs? */
1934 bool mark_lost; 1940 bool mark_lost;
1935 1941
@@ -1989,15 +1995,18 @@ void tcp_enter_loss(struct sock *sk)
1989 tp->high_seq = tp->snd_nxt; 1995 tp->high_seq = tp->snd_nxt;
1990 tcp_ecn_queue_cwr(tp); 1996 tcp_ecn_queue_cwr(tp);
1991 1997
1992 /* F-RTO RFC5682 sec 3.1 step 1 mandates to disable F-RTO 1998 /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
1993 * if a previous recovery is underway, otherwise it may incorrectly 1999 * loss recovery is underway except recurring timeout(s) on
1994 * call a timeout spurious if some previously retransmitted packets 2000 * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
1995 * are s/acked (sec 3.2). We do not apply that retriction since 2001 *
1996 * retransmitted skbs are permanently tagged with TCPCB_EVER_RETRANS 2002 * In theory F-RTO can be used repeatedly during loss recovery.
1997 * so FLAG_ORIG_SACK_ACKED is always correct. But we do disable F-RTO 2003 * In practice this interacts badly with broken middle-boxes that
1998 * on PTMU discovery to avoid sending new data. 2004 * falsely raise the receive window, which results in repeated
2005 * timeouts and stop-and-go behavior.
1999 */ 2006 */
2000 tp->frto = sysctl_tcp_frto && !inet_csk(sk)->icsk_mtup.probe_size; 2007 tp->frto = sysctl_tcp_frto &&
2008 (new_recovery || icsk->icsk_retransmits) &&
2009 !inet_csk(sk)->icsk_mtup.probe_size;
2001} 2010}
2002 2011
2003/* If ACK arrived pointing to a remembered SACK, it means that our 2012/* If ACK arrived pointing to a remembered SACK, it means that our
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 22548b5f05cb..c3c082ed3879 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2999,6 +2999,8 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
2999{ 2999{
3000 struct sk_buff *skb; 3000 struct sk_buff *skb;
3001 3001
3002 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
3003
3002 /* NOTE: No TCP options attached and we never retransmit this. */ 3004 /* NOTE: No TCP options attached and we never retransmit this. */
3003 skb = alloc_skb(MAX_TCP_HEADER, priority); 3005 skb = alloc_skb(MAX_TCP_HEADER, priority);
3004 if (!skb) { 3006 if (!skb) {
@@ -3014,8 +3016,6 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
3014 /* Send it off. */ 3016 /* Send it off. */
3015 if (tcp_transmit_skb(sk, skb, 0, priority)) 3017 if (tcp_transmit_skb(sk, skb, 0, priority))
3016 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 3018 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3017
3018 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
3019} 3019}
3020 3020
3021/* Send a crossed SYN-ACK during socket establishment. 3021/* Send a crossed SYN-ACK during socket establishment.
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index 4ecb38ae8504..d8acbd9f477a 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -12,7 +12,8 @@ static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
12 /* Account for retransmits that are lost again */ 12 /* Account for retransmits that are lost again */
13 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 13 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
14 tp->retrans_out -= tcp_skb_pcount(skb); 14 tp->retrans_out -= tcp_skb_pcount(skb);
15 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT); 15 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
16 tcp_skb_pcount(skb));
16 } 17 }
17} 18}
18 19
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 363172527e43..80ce478c4851 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3626,14 +3626,19 @@ restart:
3626 INIT_LIST_HEAD(&del_list); 3626 INIT_LIST_HEAD(&del_list);
3627 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) { 3627 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
3628 struct rt6_info *rt = NULL; 3628 struct rt6_info *rt = NULL;
3629 bool keep;
3629 3630
3630 addrconf_del_dad_work(ifa); 3631 addrconf_del_dad_work(ifa);
3631 3632
3633 keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
3634 !addr_is_local(&ifa->addr);
3635 if (!keep)
3636 list_move(&ifa->if_list, &del_list);
3637
3632 write_unlock_bh(&idev->lock); 3638 write_unlock_bh(&idev->lock);
3633 spin_lock_bh(&ifa->lock); 3639 spin_lock_bh(&ifa->lock);
3634 3640
3635 if (keep_addr && (ifa->flags & IFA_F_PERMANENT) && 3641 if (keep) {
3636 !addr_is_local(&ifa->addr)) {
3637 /* set state to skip the notifier below */ 3642 /* set state to skip the notifier below */
3638 state = INET6_IFADDR_STATE_DEAD; 3643 state = INET6_IFADDR_STATE_DEAD;
3639 ifa->state = 0; 3644 ifa->state = 0;
@@ -3645,8 +3650,6 @@ restart:
3645 } else { 3650 } else {
3646 state = ifa->state; 3651 state = ifa->state;
3647 ifa->state = INET6_IFADDR_STATE_DEAD; 3652 ifa->state = INET6_IFADDR_STATE_DEAD;
3648
3649 list_move(&ifa->if_list, &del_list);
3650 } 3653 }
3651 3654
3652 spin_unlock_bh(&ifa->lock); 3655 spin_unlock_bh(&ifa->lock);
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 309062f3debe..31762f76cdb5 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1687,7 +1687,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1687 struct kcm_attach info; 1687 struct kcm_attach info;
1688 1688
1689 if (copy_from_user(&info, (void __user *)arg, sizeof(info))) 1689 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1690 err = -EFAULT; 1690 return -EFAULT;
1691 1691
1692 err = kcm_attach_ioctl(sock, &info); 1692 err = kcm_attach_ioctl(sock, &info);
1693 1693
@@ -1697,7 +1697,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1697 struct kcm_unattach info; 1697 struct kcm_unattach info;
1698 1698
1699 if (copy_from_user(&info, (void __user *)arg, sizeof(info))) 1699 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1700 err = -EFAULT; 1700 return -EFAULT;
1701 1701
1702 err = kcm_unattach_ioctl(sock, &info); 1702 err = kcm_unattach_ioctl(sock, &info);
1703 1703
@@ -1708,7 +1708,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1708 struct socket *newsock = NULL; 1708 struct socket *newsock = NULL;
1709 1709
1710 if (copy_from_user(&info, (void __user *)arg, sizeof(info))) 1710 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1711 err = -EFAULT; 1711 return -EFAULT;
1712 1712
1713 err = kcm_clone(sock, &info, &newsock); 1713 err = kcm_clone(sock, &info, &newsock);
1714 1714
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 8adab6335ced..e37d9554da7b 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -278,7 +278,57 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn
278} 278}
279EXPORT_SYMBOL_GPL(l2tp_session_find); 279EXPORT_SYMBOL_GPL(l2tp_session_find);
280 280
281struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) 281/* Like l2tp_session_find() but takes a reference on the returned session.
282 * Optionally calls session->ref() too if do_ref is true.
283 */
284struct l2tp_session *l2tp_session_get(struct net *net,
285 struct l2tp_tunnel *tunnel,
286 u32 session_id, bool do_ref)
287{
288 struct hlist_head *session_list;
289 struct l2tp_session *session;
290
291 if (!tunnel) {
292 struct l2tp_net *pn = l2tp_pernet(net);
293
294 session_list = l2tp_session_id_hash_2(pn, session_id);
295
296 rcu_read_lock_bh();
297 hlist_for_each_entry_rcu(session, session_list, global_hlist) {
298 if (session->session_id == session_id) {
299 l2tp_session_inc_refcount(session);
300 if (do_ref && session->ref)
301 session->ref(session);
302 rcu_read_unlock_bh();
303
304 return session;
305 }
306 }
307 rcu_read_unlock_bh();
308
309 return NULL;
310 }
311
312 session_list = l2tp_session_id_hash(tunnel, session_id);
313 read_lock_bh(&tunnel->hlist_lock);
314 hlist_for_each_entry(session, session_list, hlist) {
315 if (session->session_id == session_id) {
316 l2tp_session_inc_refcount(session);
317 if (do_ref && session->ref)
318 session->ref(session);
319 read_unlock_bh(&tunnel->hlist_lock);
320
321 return session;
322 }
323 }
324 read_unlock_bh(&tunnel->hlist_lock);
325
326 return NULL;
327}
328EXPORT_SYMBOL_GPL(l2tp_session_get);
329
330struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
331 bool do_ref)
282{ 332{
283 int hash; 333 int hash;
284 struct l2tp_session *session; 334 struct l2tp_session *session;
@@ -288,6 +338,9 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
288 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { 338 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
289 hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) { 339 hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
290 if (++count > nth) { 340 if (++count > nth) {
341 l2tp_session_inc_refcount(session);
342 if (do_ref && session->ref)
343 session->ref(session);
291 read_unlock_bh(&tunnel->hlist_lock); 344 read_unlock_bh(&tunnel->hlist_lock);
292 return session; 345 return session;
293 } 346 }
@@ -298,12 +351,13 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
298 351
299 return NULL; 352 return NULL;
300} 353}
301EXPORT_SYMBOL_GPL(l2tp_session_find_nth); 354EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
302 355
303/* Lookup a session by interface name. 356/* Lookup a session by interface name.
304 * This is very inefficient but is only used by management interfaces. 357 * This is very inefficient but is only used by management interfaces.
305 */ 358 */
306struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) 359struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname,
360 bool do_ref)
307{ 361{
308 struct l2tp_net *pn = l2tp_pernet(net); 362 struct l2tp_net *pn = l2tp_pernet(net);
309 int hash; 363 int hash;
@@ -313,7 +367,11 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
313 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) { 367 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
314 hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) { 368 hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
315 if (!strcmp(session->ifname, ifname)) { 369 if (!strcmp(session->ifname, ifname)) {
370 l2tp_session_inc_refcount(session);
371 if (do_ref && session->ref)
372 session->ref(session);
316 rcu_read_unlock_bh(); 373 rcu_read_unlock_bh();
374
317 return session; 375 return session;
318 } 376 }
319 } 377 }
@@ -323,7 +381,49 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
323 381
324 return NULL; 382 return NULL;
325} 383}
326EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname); 384EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
385
386static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel,
387 struct l2tp_session *session)
388{
389 struct l2tp_session *session_walk;
390 struct hlist_head *g_head;
391 struct hlist_head *head;
392 struct l2tp_net *pn;
393
394 head = l2tp_session_id_hash(tunnel, session->session_id);
395
396 write_lock_bh(&tunnel->hlist_lock);
397 hlist_for_each_entry(session_walk, head, hlist)
398 if (session_walk->session_id == session->session_id)
399 goto exist;
400
401 if (tunnel->version == L2TP_HDR_VER_3) {
402 pn = l2tp_pernet(tunnel->l2tp_net);
403 g_head = l2tp_session_id_hash_2(l2tp_pernet(tunnel->l2tp_net),
404 session->session_id);
405
406 spin_lock_bh(&pn->l2tp_session_hlist_lock);
407 hlist_for_each_entry(session_walk, g_head, global_hlist)
408 if (session_walk->session_id == session->session_id)
409 goto exist_glob;
410
411 hlist_add_head_rcu(&session->global_hlist, g_head);
412 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
413 }
414
415 hlist_add_head(&session->hlist, head);
416 write_unlock_bh(&tunnel->hlist_lock);
417
418 return 0;
419
420exist_glob:
421 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
422exist:
423 write_unlock_bh(&tunnel->hlist_lock);
424
425 return -EEXIST;
426}
327 427
328/* Lookup a tunnel by id 428/* Lookup a tunnel by id
329 */ 429 */
@@ -633,6 +733,9 @@ discard:
633 * a data (not control) frame before coming here. Fields up to the 733 * a data (not control) frame before coming here. Fields up to the
634 * session-id have already been parsed and ptr points to the data 734 * session-id have already been parsed and ptr points to the data
635 * after the session-id. 735 * after the session-id.
736 *
737 * session->ref() must have been called prior to l2tp_recv_common().
738 * session->deref() will be called automatically after skb is processed.
636 */ 739 */
637void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, 740void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
638 unsigned char *ptr, unsigned char *optr, u16 hdrflags, 741 unsigned char *ptr, unsigned char *optr, u16 hdrflags,
@@ -642,14 +745,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
642 int offset; 745 int offset;
643 u32 ns, nr; 746 u32 ns, nr;
644 747
645 /* The ref count is increased since we now hold a pointer to
646 * the session. Take care to decrement the refcnt when exiting
647 * this function from now on...
648 */
649 l2tp_session_inc_refcount(session);
650 if (session->ref)
651 (*session->ref)(session);
652
653 /* Parse and check optional cookie */ 748 /* Parse and check optional cookie */
654 if (session->peer_cookie_len > 0) { 749 if (session->peer_cookie_len > 0) {
655 if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) { 750 if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
@@ -802,8 +897,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
802 /* Try to dequeue as many skbs from reorder_q as we can. */ 897 /* Try to dequeue as many skbs from reorder_q as we can. */
803 l2tp_recv_dequeue(session); 898 l2tp_recv_dequeue(session);
804 899
805 l2tp_session_dec_refcount(session);
806
807 return; 900 return;
808 901
809discard: 902discard:
@@ -812,8 +905,6 @@ discard:
812 905
813 if (session->deref) 906 if (session->deref)
814 (*session->deref)(session); 907 (*session->deref)(session);
815
816 l2tp_session_dec_refcount(session);
817} 908}
818EXPORT_SYMBOL(l2tp_recv_common); 909EXPORT_SYMBOL(l2tp_recv_common);
819 910
@@ -920,8 +1011,14 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
920 } 1011 }
921 1012
922 /* Find the session context */ 1013 /* Find the session context */
923 session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id); 1014 session = l2tp_session_get(tunnel->l2tp_net, tunnel, session_id, true);
924 if (!session || !session->recv_skb) { 1015 if (!session || !session->recv_skb) {
1016 if (session) {
1017 if (session->deref)
1018 session->deref(session);
1019 l2tp_session_dec_refcount(session);
1020 }
1021
925 /* Not found? Pass to userspace to deal with */ 1022 /* Not found? Pass to userspace to deal with */
926 l2tp_info(tunnel, L2TP_MSG_DATA, 1023 l2tp_info(tunnel, L2TP_MSG_DATA,
927 "%s: no session found (%u/%u). Passing up.\n", 1024 "%s: no session found (%u/%u). Passing up.\n",
@@ -930,6 +1027,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
930 } 1027 }
931 1028
932 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook); 1029 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
1030 l2tp_session_dec_refcount(session);
933 1031
934 return 0; 1032 return 0;
935 1033
@@ -1738,6 +1836,7 @@ EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1738struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) 1836struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
1739{ 1837{
1740 struct l2tp_session *session; 1838 struct l2tp_session *session;
1839 int err;
1741 1840
1742 session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL); 1841 session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
1743 if (session != NULL) { 1842 if (session != NULL) {
@@ -1793,6 +1892,13 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
1793 1892
1794 l2tp_session_set_header_len(session, tunnel->version); 1893 l2tp_session_set_header_len(session, tunnel->version);
1795 1894
1895 err = l2tp_session_add_to_tunnel(tunnel, session);
1896 if (err) {
1897 kfree(session);
1898
1899 return ERR_PTR(err);
1900 }
1901
1796 /* Bump the reference count. The session context is deleted 1902 /* Bump the reference count. The session context is deleted
1797 * only when this drops to zero. 1903 * only when this drops to zero.
1798 */ 1904 */
@@ -1802,28 +1908,14 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
1802 /* Ensure tunnel socket isn't deleted */ 1908 /* Ensure tunnel socket isn't deleted */
1803 sock_hold(tunnel->sock); 1909 sock_hold(tunnel->sock);
1804 1910
1805 /* Add session to the tunnel's hash list */
1806 write_lock_bh(&tunnel->hlist_lock);
1807 hlist_add_head(&session->hlist,
1808 l2tp_session_id_hash(tunnel, session_id));
1809 write_unlock_bh(&tunnel->hlist_lock);
1810
1811 /* And to the global session list if L2TPv3 */
1812 if (tunnel->version != L2TP_HDR_VER_2) {
1813 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1814
1815 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1816 hlist_add_head_rcu(&session->global_hlist,
1817 l2tp_session_id_hash_2(pn, session_id));
1818 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1819 }
1820
1821 /* Ignore management session in session count value */ 1911 /* Ignore management session in session count value */
1822 if (session->session_id != 0) 1912 if (session->session_id != 0)
1823 atomic_inc(&l2tp_session_count); 1913 atomic_inc(&l2tp_session_count);
1914
1915 return session;
1824 } 1916 }
1825 1917
1826 return session; 1918 return ERR_PTR(-ENOMEM);
1827} 1919}
1828EXPORT_SYMBOL_GPL(l2tp_session_create); 1920EXPORT_SYMBOL_GPL(l2tp_session_create);
1829 1921
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index aebf281d09ee..8ce7818c7a9d 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -230,11 +230,16 @@ out:
230 return tunnel; 230 return tunnel;
231} 231}
232 232
233struct l2tp_session *l2tp_session_get(struct net *net,
234 struct l2tp_tunnel *tunnel,
235 u32 session_id, bool do_ref);
233struct l2tp_session *l2tp_session_find(struct net *net, 236struct l2tp_session *l2tp_session_find(struct net *net,
234 struct l2tp_tunnel *tunnel, 237 struct l2tp_tunnel *tunnel,
235 u32 session_id); 238 u32 session_id);
236struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth); 239struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
237struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); 240 bool do_ref);
241struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname,
242 bool do_ref);
238struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id); 243struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
239struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); 244struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
240 245
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 2d6760a2ae34..d100aed3d06f 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -53,7 +53,7 @@ static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd)
53 53
54static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd) 54static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
55{ 55{
56 pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); 56 pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
57 pd->session_idx++; 57 pd->session_idx++;
58 58
59 if (pd->session == NULL) { 59 if (pd->session == NULL) {
@@ -238,10 +238,14 @@ static int l2tp_dfs_seq_show(struct seq_file *m, void *v)
238 } 238 }
239 239
240 /* Show the tunnel or session context */ 240 /* Show the tunnel or session context */
241 if (pd->session == NULL) 241 if (!pd->session) {
242 l2tp_dfs_seq_tunnel_show(m, pd->tunnel); 242 l2tp_dfs_seq_tunnel_show(m, pd->tunnel);
243 else 243 } else {
244 l2tp_dfs_seq_session_show(m, pd->session); 244 l2tp_dfs_seq_session_show(m, pd->session);
245 if (pd->session->deref)
246 pd->session->deref(pd->session);
247 l2tp_session_dec_refcount(pd->session);
248 }
245 249
246out: 250out:
247 return 0; 251 return 0;
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 8bf18a5f66e0..6fd41d7afe1e 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -221,12 +221,6 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
221 goto out; 221 goto out;
222 } 222 }
223 223
224 session = l2tp_session_find(net, tunnel, session_id);
225 if (session) {
226 rc = -EEXIST;
227 goto out;
228 }
229
230 if (cfg->ifname) { 224 if (cfg->ifname) {
231 dev = dev_get_by_name(net, cfg->ifname); 225 dev = dev_get_by_name(net, cfg->ifname);
232 if (dev) { 226 if (dev) {
@@ -240,8 +234,8 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
240 234
241 session = l2tp_session_create(sizeof(*spriv), tunnel, session_id, 235 session = l2tp_session_create(sizeof(*spriv), tunnel, session_id,
242 peer_session_id, cfg); 236 peer_session_id, cfg);
243 if (!session) { 237 if (IS_ERR(session)) {
244 rc = -ENOMEM; 238 rc = PTR_ERR(session);
245 goto out; 239 goto out;
246 } 240 }
247 241
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index d25038cfd64e..4d322c1b7233 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -143,19 +143,19 @@ static int l2tp_ip_recv(struct sk_buff *skb)
143 } 143 }
144 144
145 /* Ok, this is a data packet. Lookup the session. */ 145 /* Ok, this is a data packet. Lookup the session. */
146 session = l2tp_session_find(net, NULL, session_id); 146 session = l2tp_session_get(net, NULL, session_id, true);
147 if (session == NULL) 147 if (!session)
148 goto discard; 148 goto discard;
149 149
150 tunnel = session->tunnel; 150 tunnel = session->tunnel;
151 if (tunnel == NULL) 151 if (!tunnel)
152 goto discard; 152 goto discard_sess;
153 153
154 /* Trace packet contents, if enabled */ 154 /* Trace packet contents, if enabled */
155 if (tunnel->debug & L2TP_MSG_DATA) { 155 if (tunnel->debug & L2TP_MSG_DATA) {
156 length = min(32u, skb->len); 156 length = min(32u, skb->len);
157 if (!pskb_may_pull(skb, length)) 157 if (!pskb_may_pull(skb, length))
158 goto discard; 158 goto discard_sess;
159 159
160 /* Point to L2TP header */ 160 /* Point to L2TP header */
161 optr = ptr = skb->data; 161 optr = ptr = skb->data;
@@ -165,6 +165,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
165 } 165 }
166 166
167 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook); 167 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
168 l2tp_session_dec_refcount(session);
168 169
169 return 0; 170 return 0;
170 171
@@ -178,9 +179,10 @@ pass_up:
178 179
179 tunnel_id = ntohl(*(__be32 *) &skb->data[4]); 180 tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
180 tunnel = l2tp_tunnel_find(net, tunnel_id); 181 tunnel = l2tp_tunnel_find(net, tunnel_id);
181 if (tunnel != NULL) 182 if (tunnel) {
182 sk = tunnel->sock; 183 sk = tunnel->sock;
183 else { 184 sock_hold(sk);
185 } else {
184 struct iphdr *iph = (struct iphdr *) skb_network_header(skb); 186 struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
185 187
186 read_lock_bh(&l2tp_ip_lock); 188 read_lock_bh(&l2tp_ip_lock);
@@ -202,6 +204,12 @@ pass_up:
202 204
203 return sk_receive_skb(sk, skb, 1); 205 return sk_receive_skb(sk, skb, 1);
204 206
207discard_sess:
208 if (session->deref)
209 session->deref(session);
210 l2tp_session_dec_refcount(session);
211 goto discard;
212
205discard_put: 213discard_put:
206 sock_put(sk); 214 sock_put(sk);
207 215
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index a4abcbc4c09a..88b397c30d86 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -156,19 +156,19 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
156 } 156 }
157 157
158 /* Ok, this is a data packet. Lookup the session. */ 158 /* Ok, this is a data packet. Lookup the session. */
159 session = l2tp_session_find(net, NULL, session_id); 159 session = l2tp_session_get(net, NULL, session_id, true);
160 if (session == NULL) 160 if (!session)
161 goto discard; 161 goto discard;
162 162
163 tunnel = session->tunnel; 163 tunnel = session->tunnel;
164 if (tunnel == NULL) 164 if (!tunnel)
165 goto discard; 165 goto discard_sess;
166 166
167 /* Trace packet contents, if enabled */ 167 /* Trace packet contents, if enabled */
168 if (tunnel->debug & L2TP_MSG_DATA) { 168 if (tunnel->debug & L2TP_MSG_DATA) {
169 length = min(32u, skb->len); 169 length = min(32u, skb->len);
170 if (!pskb_may_pull(skb, length)) 170 if (!pskb_may_pull(skb, length))
171 goto discard; 171 goto discard_sess;
172 172
173 /* Point to L2TP header */ 173 /* Point to L2TP header */
174 optr = ptr = skb->data; 174 optr = ptr = skb->data;
@@ -179,6 +179,8 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
179 179
180 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, 180 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len,
181 tunnel->recv_payload_hook); 181 tunnel->recv_payload_hook);
182 l2tp_session_dec_refcount(session);
183
182 return 0; 184 return 0;
183 185
184pass_up: 186pass_up:
@@ -191,9 +193,10 @@ pass_up:
191 193
192 tunnel_id = ntohl(*(__be32 *) &skb->data[4]); 194 tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
193 tunnel = l2tp_tunnel_find(net, tunnel_id); 195 tunnel = l2tp_tunnel_find(net, tunnel_id);
194 if (tunnel != NULL) 196 if (tunnel) {
195 sk = tunnel->sock; 197 sk = tunnel->sock;
196 else { 198 sock_hold(sk);
199 } else {
197 struct ipv6hdr *iph = ipv6_hdr(skb); 200 struct ipv6hdr *iph = ipv6_hdr(skb);
198 201
199 read_lock_bh(&l2tp_ip6_lock); 202 read_lock_bh(&l2tp_ip6_lock);
@@ -215,6 +218,12 @@ pass_up:
215 218
216 return sk_receive_skb(sk, skb, 1); 219 return sk_receive_skb(sk, skb, 1);
217 220
221discard_sess:
222 if (session->deref)
223 session->deref(session);
224 l2tp_session_dec_refcount(session);
225 goto discard;
226
218discard_put: 227discard_put:
219 sock_put(sk); 228 sock_put(sk);
220 229
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 3620fba31786..7e3e669baac4 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -48,7 +48,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq,
48/* Accessed under genl lock */ 48/* Accessed under genl lock */
49static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX]; 49static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX];
50 50
51static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info) 51static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info,
52 bool do_ref)
52{ 53{
53 u32 tunnel_id; 54 u32 tunnel_id;
54 u32 session_id; 55 u32 session_id;
@@ -59,14 +60,15 @@ static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info)
59 60
60 if (info->attrs[L2TP_ATTR_IFNAME]) { 61 if (info->attrs[L2TP_ATTR_IFNAME]) {
61 ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]); 62 ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
62 session = l2tp_session_find_by_ifname(net, ifname); 63 session = l2tp_session_get_by_ifname(net, ifname, do_ref);
63 } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) && 64 } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) &&
64 (info->attrs[L2TP_ATTR_CONN_ID])) { 65 (info->attrs[L2TP_ATTR_CONN_ID])) {
65 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); 66 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
66 session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); 67 session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
67 tunnel = l2tp_tunnel_find(net, tunnel_id); 68 tunnel = l2tp_tunnel_find(net, tunnel_id);
68 if (tunnel) 69 if (tunnel)
69 session = l2tp_session_find(net, tunnel, session_id); 70 session = l2tp_session_get(net, tunnel, session_id,
71 do_ref);
70 } 72 }
71 73
72 return session; 74 return session;
@@ -642,10 +644,12 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
642 session_id, peer_session_id, &cfg); 644 session_id, peer_session_id, &cfg);
643 645
644 if (ret >= 0) { 646 if (ret >= 0) {
645 session = l2tp_session_find(net, tunnel, session_id); 647 session = l2tp_session_get(net, tunnel, session_id, false);
646 if (session) 648 if (session) {
647 ret = l2tp_session_notify(&l2tp_nl_family, info, session, 649 ret = l2tp_session_notify(&l2tp_nl_family, info, session,
648 L2TP_CMD_SESSION_CREATE); 650 L2TP_CMD_SESSION_CREATE);
651 l2tp_session_dec_refcount(session);
652 }
649 } 653 }
650 654
651out: 655out:
@@ -658,7 +662,7 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
658 struct l2tp_session *session; 662 struct l2tp_session *session;
659 u16 pw_type; 663 u16 pw_type;
660 664
661 session = l2tp_nl_session_find(info); 665 session = l2tp_nl_session_get(info, true);
662 if (session == NULL) { 666 if (session == NULL) {
663 ret = -ENODEV; 667 ret = -ENODEV;
664 goto out; 668 goto out;
@@ -672,6 +676,10 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
672 if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete) 676 if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete)
673 ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session); 677 ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session);
674 678
679 if (session->deref)
680 session->deref(session);
681 l2tp_session_dec_refcount(session);
682
675out: 683out:
676 return ret; 684 return ret;
677} 685}
@@ -681,7 +689,7 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
681 int ret = 0; 689 int ret = 0;
682 struct l2tp_session *session; 690 struct l2tp_session *session;
683 691
684 session = l2tp_nl_session_find(info); 692 session = l2tp_nl_session_get(info, false);
685 if (session == NULL) { 693 if (session == NULL) {
686 ret = -ENODEV; 694 ret = -ENODEV;
687 goto out; 695 goto out;
@@ -716,6 +724,8 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
716 ret = l2tp_session_notify(&l2tp_nl_family, info, 724 ret = l2tp_session_notify(&l2tp_nl_family, info,
717 session, L2TP_CMD_SESSION_MODIFY); 725 session, L2TP_CMD_SESSION_MODIFY);
718 726
727 l2tp_session_dec_refcount(session);
728
719out: 729out:
720 return ret; 730 return ret;
721} 731}
@@ -811,29 +821,34 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
811 struct sk_buff *msg; 821 struct sk_buff *msg;
812 int ret; 822 int ret;
813 823
814 session = l2tp_nl_session_find(info); 824 session = l2tp_nl_session_get(info, false);
815 if (session == NULL) { 825 if (session == NULL) {
816 ret = -ENODEV; 826 ret = -ENODEV;
817 goto out; 827 goto err;
818 } 828 }
819 829
820 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 830 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
821 if (!msg) { 831 if (!msg) {
822 ret = -ENOMEM; 832 ret = -ENOMEM;
823 goto out; 833 goto err_ref;
824 } 834 }
825 835
826 ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq, 836 ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
827 0, session, L2TP_CMD_SESSION_GET); 837 0, session, L2TP_CMD_SESSION_GET);
828 if (ret < 0) 838 if (ret < 0)
829 goto err_out; 839 goto err_ref_msg;
830 840
831 return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid); 841 ret = genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
832 842
833err_out: 843 l2tp_session_dec_refcount(session);
834 nlmsg_free(msg);
835 844
836out: 845 return ret;
846
847err_ref_msg:
848 nlmsg_free(msg);
849err_ref:
850 l2tp_session_dec_refcount(session);
851err:
837 return ret; 852 return ret;
838} 853}
839 854
@@ -852,7 +867,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
852 goto out; 867 goto out;
853 } 868 }
854 869
855 session = l2tp_session_find_nth(tunnel, si); 870 session = l2tp_session_get_nth(tunnel, si, false);
856 if (session == NULL) { 871 if (session == NULL) {
857 ti++; 872 ti++;
858 tunnel = NULL; 873 tunnel = NULL;
@@ -862,8 +877,11 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
862 877
863 if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid, 878 if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
864 cb->nlh->nlmsg_seq, NLM_F_MULTI, 879 cb->nlh->nlmsg_seq, NLM_F_MULTI,
865 session, L2TP_CMD_SESSION_GET) < 0) 880 session, L2TP_CMD_SESSION_GET) < 0) {
881 l2tp_session_dec_refcount(session);
866 break; 882 break;
883 }
884 l2tp_session_dec_refcount(session);
867 885
868 si++; 886 si++;
869 } 887 }
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 36cc56fd0418..32ea0f3d868c 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -450,6 +450,10 @@ static void pppol2tp_session_close(struct l2tp_session *session)
450static void pppol2tp_session_destruct(struct sock *sk) 450static void pppol2tp_session_destruct(struct sock *sk)
451{ 451{
452 struct l2tp_session *session = sk->sk_user_data; 452 struct l2tp_session *session = sk->sk_user_data;
453
454 skb_queue_purge(&sk->sk_receive_queue);
455 skb_queue_purge(&sk->sk_write_queue);
456
453 if (session) { 457 if (session) {
454 sk->sk_user_data = NULL; 458 sk->sk_user_data = NULL;
455 BUG_ON(session->magic != L2TP_SESSION_MAGIC); 459 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
@@ -488,9 +492,6 @@ static int pppol2tp_release(struct socket *sock)
488 l2tp_session_queue_purge(session); 492 l2tp_session_queue_purge(session);
489 sock_put(sk); 493 sock_put(sk);
490 } 494 }
491 skb_queue_purge(&sk->sk_receive_queue);
492 skb_queue_purge(&sk->sk_write_queue);
493
494 release_sock(sk); 495 release_sock(sk);
495 496
496 /* This will delete the session context via 497 /* This will delete the session context via
@@ -582,6 +583,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
582 int error = 0; 583 int error = 0;
583 u32 tunnel_id, peer_tunnel_id; 584 u32 tunnel_id, peer_tunnel_id;
584 u32 session_id, peer_session_id; 585 u32 session_id, peer_session_id;
586 bool drop_refcnt = false;
585 int ver = 2; 587 int ver = 2;
586 int fd; 588 int fd;
587 589
@@ -683,36 +685,36 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
683 if (tunnel->peer_tunnel_id == 0) 685 if (tunnel->peer_tunnel_id == 0)
684 tunnel->peer_tunnel_id = peer_tunnel_id; 686 tunnel->peer_tunnel_id = peer_tunnel_id;
685 687
686 /* Create session if it doesn't already exist. We handle the 688 session = l2tp_session_get(sock_net(sk), tunnel, session_id, false);
687 * case where a session was previously created by the netlink 689 if (session) {
688 * interface by checking that the session doesn't already have 690 drop_refcnt = true;
689 * a socket and its tunnel socket are what we expect. If any 691 ps = l2tp_session_priv(session);
690 * of those checks fail, return EEXIST to the caller. 692
691 */ 693 /* Using a pre-existing session is fine as long as it hasn't
692 session = l2tp_session_find(sock_net(sk), tunnel, session_id); 694 * been connected yet.
693 if (session == NULL) {
694 /* Default MTU must allow space for UDP/L2TP/PPP
695 * headers.
696 */ 695 */
697 cfg.mtu = cfg.mru = 1500 - PPPOL2TP_HEADER_OVERHEAD; 696 if (ps->sock) {
697 error = -EEXIST;
698 goto end;
699 }
698 700
699 /* Allocate and initialize a new session context. */ 701 /* consistency checks */
700 session = l2tp_session_create(sizeof(struct pppol2tp_session), 702 if (ps->tunnel_sock != tunnel->sock) {
701 tunnel, session_id, 703 error = -EEXIST;
702 peer_session_id, &cfg);
703 if (session == NULL) {
704 error = -ENOMEM;
705 goto end; 704 goto end;
706 } 705 }
707 } else { 706 } else {
708 ps = l2tp_session_priv(session); 707 /* Default MTU must allow space for UDP/L2TP/PPP headers */
709 error = -EEXIST; 708 cfg.mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
710 if (ps->sock != NULL) 709 cfg.mru = cfg.mtu;
711 goto end;
712 710
713 /* consistency checks */ 711 session = l2tp_session_create(sizeof(struct pppol2tp_session),
714 if (ps->tunnel_sock != tunnel->sock) 712 tunnel, session_id,
713 peer_session_id, &cfg);
714 if (IS_ERR(session)) {
715 error = PTR_ERR(session);
715 goto end; 716 goto end;
717 }
716 } 718 }
717 719
718 /* Associate session with its PPPoL2TP socket */ 720 /* Associate session with its PPPoL2TP socket */
@@ -777,6 +779,8 @@ out_no_ppp:
777 session->name); 779 session->name);
778 780
779end: 781end:
782 if (drop_refcnt)
783 l2tp_session_dec_refcount(session);
780 release_sock(sk); 784 release_sock(sk);
781 785
782 return error; 786 return error;
@@ -804,12 +808,6 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i
804 if (tunnel->sock == NULL) 808 if (tunnel->sock == NULL)
805 goto out; 809 goto out;
806 810
807 /* Check that this session doesn't already exist */
808 error = -EEXIST;
809 session = l2tp_session_find(net, tunnel, session_id);
810 if (session != NULL)
811 goto out;
812
813 /* Default MTU values. */ 811 /* Default MTU values. */
814 if (cfg->mtu == 0) 812 if (cfg->mtu == 0)
815 cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD; 813 cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
@@ -817,12 +815,13 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i
817 cfg->mru = cfg->mtu; 815 cfg->mru = cfg->mtu;
818 816
819 /* Allocate and initialize a new session context. */ 817 /* Allocate and initialize a new session context. */
820 error = -ENOMEM;
821 session = l2tp_session_create(sizeof(struct pppol2tp_session), 818 session = l2tp_session_create(sizeof(struct pppol2tp_session),
822 tunnel, session_id, 819 tunnel, session_id,
823 peer_session_id, cfg); 820 peer_session_id, cfg);
824 if (session == NULL) 821 if (IS_ERR(session)) {
822 error = PTR_ERR(session);
825 goto out; 823 goto out;
824 }
826 825
827 ps = l2tp_session_priv(session); 826 ps = l2tp_session_priv(session);
828 ps->tunnel_sock = tunnel->sock; 827 ps->tunnel_sock = tunnel->sock;
@@ -1140,11 +1139,18 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
1140 if (stats.session_id != 0) { 1139 if (stats.session_id != 0) {
1141 /* resend to session ioctl handler */ 1140 /* resend to session ioctl handler */
1142 struct l2tp_session *session = 1141 struct l2tp_session *session =
1143 l2tp_session_find(sock_net(sk), tunnel, stats.session_id); 1142 l2tp_session_get(sock_net(sk), tunnel,
1144 if (session != NULL) 1143 stats.session_id, true);
1145 err = pppol2tp_session_ioctl(session, cmd, arg); 1144
1146 else 1145 if (session) {
1146 err = pppol2tp_session_ioctl(session, cmd,
1147 arg);
1148 if (session->deref)
1149 session->deref(session);
1150 l2tp_session_dec_refcount(session);
1151 } else {
1147 err = -EBADR; 1152 err = -EBADR;
1153 }
1148 break; 1154 break;
1149 } 1155 }
1150#ifdef CONFIG_XFRM 1156#ifdef CONFIG_XFRM
@@ -1377,8 +1383,6 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
1377 } else 1383 } else
1378 err = pppol2tp_session_setsockopt(sk, session, optname, val); 1384 err = pppol2tp_session_setsockopt(sk, session, optname, val);
1379 1385
1380 err = 0;
1381
1382end_put_sess: 1386end_put_sess:
1383 sock_put(sk); 1387 sock_put(sk);
1384end: 1388end:
@@ -1501,8 +1505,13 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
1501 1505
1502 err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val); 1506 err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
1503 sock_put(ps->tunnel_sock); 1507 sock_put(ps->tunnel_sock);
1504 } else 1508 if (err)
1509 goto end_put_sess;
1510 } else {
1505 err = pppol2tp_session_getsockopt(sk, session, optname, &val); 1511 err = pppol2tp_session_getsockopt(sk, session, optname, &val);
1512 if (err)
1513 goto end_put_sess;
1514 }
1506 1515
1507 err = -EFAULT; 1516 err = -EFAULT;
1508 if (put_user(len, optlen)) 1517 if (put_user(len, optlen))
@@ -1554,7 +1563,7 @@ static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd)
1554 1563
1555static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd) 1564static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd)
1556{ 1565{
1557 pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); 1566 pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
1558 pd->session_idx++; 1567 pd->session_idx++;
1559 1568
1560 if (pd->session == NULL) { 1569 if (pd->session == NULL) {
@@ -1681,10 +1690,14 @@ static int pppol2tp_seq_show(struct seq_file *m, void *v)
1681 1690
1682 /* Show the tunnel or session context. 1691 /* Show the tunnel or session context.
1683 */ 1692 */
1684 if (pd->session == NULL) 1693 if (!pd->session) {
1685 pppol2tp_seq_tunnel_show(m, pd->tunnel); 1694 pppol2tp_seq_tunnel_show(m, pd->tunnel);
1686 else 1695 } else {
1687 pppol2tp_seq_session_show(m, pd->session); 1696 pppol2tp_seq_session_show(m, pd->session);
1697 if (pd->session->deref)
1698 pd->session->deref(pd->session);
1699 l2tp_session_dec_refcount(pd->session);
1700 }
1688 1701
1689out: 1702out:
1690 return 0; 1703 return 0;
@@ -1843,4 +1856,4 @@ MODULE_DESCRIPTION("PPP over L2TP over UDP");
1843MODULE_LICENSE("GPL"); 1856MODULE_LICENSE("GPL");
1844MODULE_VERSION(PPPOL2TP_DRV_VERSION); 1857MODULE_VERSION(PPPOL2TP_DRV_VERSION);
1845MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_OL2TP); 1858MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_OL2TP);
1846MODULE_ALIAS_L2TP_PWTYPE(11); 1859MODULE_ALIAS_L2TP_PWTYPE(7);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 40813dd3301c..5bb0c5012819 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -718,7 +718,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
718 ieee80211_recalc_ps(local); 718 ieee80211_recalc_ps(local);
719 719
720 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 720 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
721 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { 721 sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
722 local->ops->wake_tx_queue) {
722 /* XXX: for AP_VLAN, actually track AP queues */ 723 /* XXX: for AP_VLAN, actually track AP queues */
723 netif_tx_start_all_queues(dev); 724 netif_tx_start_all_queues(dev);
724 } else if (dev) { 725 } else if (dev) {
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index da9df2d56e66..22fc32143e9c 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -290,6 +290,7 @@ void nf_conntrack_unregister_notifier(struct net *net,
290 BUG_ON(notify != new); 290 BUG_ON(notify != new);
291 RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL); 291 RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
292 mutex_unlock(&nf_ct_ecache_mutex); 292 mutex_unlock(&nf_ct_ecache_mutex);
293 /* synchronize_rcu() is called from ctnetlink_exit. */
293} 294}
294EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier); 295EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
295 296
@@ -326,6 +327,7 @@ void nf_ct_expect_unregister_notifier(struct net *net,
326 BUG_ON(notify != new); 327 BUG_ON(notify != new);
327 RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL); 328 RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
328 mutex_unlock(&nf_ct_ecache_mutex); 329 mutex_unlock(&nf_ct_ecache_mutex);
330 /* synchronize_rcu() is called from ctnetlink_exit. */
329} 331}
330EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier); 332EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
331 333
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 4b2e1fb28bb4..d80073037856 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -57,7 +57,7 @@ void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
57 hlist_del_rcu(&exp->hnode); 57 hlist_del_rcu(&exp->hnode);
58 net->ct.expect_count--; 58 net->ct.expect_count--;
59 59
60 hlist_del(&exp->lnode); 60 hlist_del_rcu(&exp->lnode);
61 master_help->expecting[exp->class]--; 61 master_help->expecting[exp->class]--;
62 62
63 nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report); 63 nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
@@ -363,7 +363,7 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
363 /* two references : one for hash insert, one for the timer */ 363 /* two references : one for hash insert, one for the timer */
364 atomic_add(2, &exp->use); 364 atomic_add(2, &exp->use);
365 365
366 hlist_add_head(&exp->lnode, &master_help->expectations); 366 hlist_add_head_rcu(&exp->lnode, &master_help->expectations);
367 master_help->expecting[exp->class]++; 367 master_help->expecting[exp->class]++;
368 368
369 hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]); 369 hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]);
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 02bcf00c2492..008299b7f78f 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -53,7 +53,11 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id,
53 53
54 rcu_read_lock(); 54 rcu_read_lock();
55 t = rcu_dereference(nf_ct_ext_types[id]); 55 t = rcu_dereference(nf_ct_ext_types[id]);
56 BUG_ON(t == NULL); 56 if (!t) {
57 rcu_read_unlock();
58 return NULL;
59 }
60
57 off = ALIGN(sizeof(struct nf_ct_ext), t->align); 61 off = ALIGN(sizeof(struct nf_ct_ext), t->align);
58 len = off + t->len + var_alloc_len; 62 len = off + t->len + var_alloc_len;
59 alloc_size = t->alloc_size + var_alloc_len; 63 alloc_size = t->alloc_size + var_alloc_len;
@@ -88,7 +92,10 @@ void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id,
88 92
89 rcu_read_lock(); 93 rcu_read_lock();
90 t = rcu_dereference(nf_ct_ext_types[id]); 94 t = rcu_dereference(nf_ct_ext_types[id]);
91 BUG_ON(t == NULL); 95 if (!t) {
96 rcu_read_unlock();
97 return NULL;
98 }
92 99
93 newoff = ALIGN(old->len, t->align); 100 newoff = ALIGN(old->len, t->align);
94 newlen = newoff + t->len + var_alloc_len; 101 newlen = newoff + t->len + var_alloc_len;
@@ -175,6 +182,6 @@ void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
175 RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL); 182 RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL);
176 update_alloc_size(type); 183 update_alloc_size(type);
177 mutex_unlock(&nf_ct_ext_type_mutex); 184 mutex_unlock(&nf_ct_ext_type_mutex);
178 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 185 synchronize_rcu();
179} 186}
180EXPORT_SYMBOL_GPL(nf_ct_extend_unregister); 187EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 6dc44d9b4190..4eeb3418366a 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -158,16 +158,25 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
158{ 158{
159 struct nf_conntrack_helper *h; 159 struct nf_conntrack_helper *h;
160 160
161 rcu_read_lock();
162
161 h = __nf_conntrack_helper_find(name, l3num, protonum); 163 h = __nf_conntrack_helper_find(name, l3num, protonum);
162#ifdef CONFIG_MODULES 164#ifdef CONFIG_MODULES
163 if (h == NULL) { 165 if (h == NULL) {
164 if (request_module("nfct-helper-%s", name) == 0) 166 rcu_read_unlock();
167 if (request_module("nfct-helper-%s", name) == 0) {
168 rcu_read_lock();
165 h = __nf_conntrack_helper_find(name, l3num, protonum); 169 h = __nf_conntrack_helper_find(name, l3num, protonum);
170 } else {
171 return h;
172 }
166 } 173 }
167#endif 174#endif
168 if (h != NULL && !try_module_get(h->me)) 175 if (h != NULL && !try_module_get(h->me))
169 h = NULL; 176 h = NULL;
170 177
178 rcu_read_unlock();
179
171 return h; 180 return h;
172} 181}
173EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get); 182EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get);
@@ -311,38 +320,36 @@ void nf_ct_helper_expectfn_unregister(struct nf_ct_helper_expectfn *n)
311} 320}
312EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_unregister); 321EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_unregister);
313 322
323/* Caller should hold the rcu lock */
314struct nf_ct_helper_expectfn * 324struct nf_ct_helper_expectfn *
315nf_ct_helper_expectfn_find_by_name(const char *name) 325nf_ct_helper_expectfn_find_by_name(const char *name)
316{ 326{
317 struct nf_ct_helper_expectfn *cur; 327 struct nf_ct_helper_expectfn *cur;
318 bool found = false; 328 bool found = false;
319 329
320 rcu_read_lock();
321 list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) { 330 list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
322 if (!strcmp(cur->name, name)) { 331 if (!strcmp(cur->name, name)) {
323 found = true; 332 found = true;
324 break; 333 break;
325 } 334 }
326 } 335 }
327 rcu_read_unlock();
328 return found ? cur : NULL; 336 return found ? cur : NULL;
329} 337}
330EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_name); 338EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_name);
331 339
340/* Caller should hold the rcu lock */
332struct nf_ct_helper_expectfn * 341struct nf_ct_helper_expectfn *
333nf_ct_helper_expectfn_find_by_symbol(const void *symbol) 342nf_ct_helper_expectfn_find_by_symbol(const void *symbol)
334{ 343{
335 struct nf_ct_helper_expectfn *cur; 344 struct nf_ct_helper_expectfn *cur;
336 bool found = false; 345 bool found = false;
337 346
338 rcu_read_lock();
339 list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) { 347 list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
340 if (cur->expectfn == symbol) { 348 if (cur->expectfn == symbol) {
341 found = true; 349 found = true;
342 break; 350 break;
343 } 351 }
344 } 352 }
345 rcu_read_unlock();
346 return found ? cur : NULL; 353 return found ? cur : NULL;
347} 354}
348EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol); 355EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 6806b5e73567..dc7dfd68fafe 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1488,11 +1488,16 @@ static int ctnetlink_change_helper(struct nf_conn *ct,
1488 * treat the second attempt as a no-op instead of returning 1488 * treat the second attempt as a no-op instead of returning
1489 * an error. 1489 * an error.
1490 */ 1490 */
1491 if (help && help->helper && 1491 err = -EBUSY;
1492 !strcmp(help->helper->name, helpname)) 1492 if (help) {
1493 return 0; 1493 rcu_read_lock();
1494 else 1494 helper = rcu_dereference(help->helper);
1495 return -EBUSY; 1495 if (helper && !strcmp(helper->name, helpname))
1496 err = 0;
1497 rcu_read_unlock();
1498 }
1499
1500 return err;
1496 } 1501 }
1497 1502
1498 if (!strcmp(helpname, "")) { 1503 if (!strcmp(helpname, "")) {
@@ -1929,9 +1934,9 @@ static int ctnetlink_new_conntrack(struct net *net, struct sock *ctnl,
1929 1934
1930 err = 0; 1935 err = 0;
1931 if (test_bit(IPS_EXPECTED_BIT, &ct->status)) 1936 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
1932 events = IPCT_RELATED; 1937 events = 1 << IPCT_RELATED;
1933 else 1938 else
1934 events = IPCT_NEW; 1939 events = 1 << IPCT_NEW;
1935 1940
1936 if (cda[CTA_LABELS] && 1941 if (cda[CTA_LABELS] &&
1937 ctnetlink_attach_labels(ct, cda) == 0) 1942 ctnetlink_attach_labels(ct, cda) == 0)
@@ -2675,8 +2680,8 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
2675 last = (struct nf_conntrack_expect *)cb->args[1]; 2680 last = (struct nf_conntrack_expect *)cb->args[1];
2676 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) { 2681 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
2677restart: 2682restart:
2678 hlist_for_each_entry(exp, &nf_ct_expect_hash[cb->args[0]], 2683 hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]],
2679 hnode) { 2684 hnode) {
2680 if (l3proto && exp->tuple.src.l3num != l3proto) 2685 if (l3proto && exp->tuple.src.l3num != l3proto)
2681 continue; 2686 continue;
2682 2687
@@ -2727,7 +2732,7 @@ ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
2727 rcu_read_lock(); 2732 rcu_read_lock();
2728 last = (struct nf_conntrack_expect *)cb->args[1]; 2733 last = (struct nf_conntrack_expect *)cb->args[1];
2729restart: 2734restart:
2730 hlist_for_each_entry(exp, &help->expectations, lnode) { 2735 hlist_for_each_entry_rcu(exp, &help->expectations, lnode) {
2731 if (l3proto && exp->tuple.src.l3num != l3proto) 2736 if (l3proto && exp->tuple.src.l3num != l3proto)
2732 continue; 2737 continue;
2733 if (cb->args[1]) { 2738 if (cb->args[1]) {
@@ -2789,6 +2794,12 @@ static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl,
2789 return -ENOENT; 2794 return -ENOENT;
2790 2795
2791 ct = nf_ct_tuplehash_to_ctrack(h); 2796 ct = nf_ct_tuplehash_to_ctrack(h);
2797 /* No expectation linked to this connection tracking. */
2798 if (!nfct_help(ct)) {
2799 nf_ct_put(ct);
2800 return 0;
2801 }
2802
2792 c.data = ct; 2803 c.data = ct;
2793 2804
2794 err = netlink_dump_start(ctnl, skb, nlh, &c); 2805 err = netlink_dump_start(ctnl, skb, nlh, &c);
@@ -3133,23 +3144,27 @@ ctnetlink_create_expect(struct net *net,
3133 return -ENOENT; 3144 return -ENOENT;
3134 ct = nf_ct_tuplehash_to_ctrack(h); 3145 ct = nf_ct_tuplehash_to_ctrack(h);
3135 3146
3147 rcu_read_lock();
3136 if (cda[CTA_EXPECT_HELP_NAME]) { 3148 if (cda[CTA_EXPECT_HELP_NAME]) {
3137 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]); 3149 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
3138 3150
3139 helper = __nf_conntrack_helper_find(helpname, u3, 3151 helper = __nf_conntrack_helper_find(helpname, u3,
3140 nf_ct_protonum(ct)); 3152 nf_ct_protonum(ct));
3141 if (helper == NULL) { 3153 if (helper == NULL) {
3154 rcu_read_unlock();
3142#ifdef CONFIG_MODULES 3155#ifdef CONFIG_MODULES
3143 if (request_module("nfct-helper-%s", helpname) < 0) { 3156 if (request_module("nfct-helper-%s", helpname) < 0) {
3144 err = -EOPNOTSUPP; 3157 err = -EOPNOTSUPP;
3145 goto err_ct; 3158 goto err_ct;
3146 } 3159 }
3160 rcu_read_lock();
3147 helper = __nf_conntrack_helper_find(helpname, u3, 3161 helper = __nf_conntrack_helper_find(helpname, u3,
3148 nf_ct_protonum(ct)); 3162 nf_ct_protonum(ct));
3149 if (helper) { 3163 if (helper) {
3150 err = -EAGAIN; 3164 err = -EAGAIN;
3151 goto err_ct; 3165 goto err_rcu;
3152 } 3166 }
3167 rcu_read_unlock();
3153#endif 3168#endif
3154 err = -EOPNOTSUPP; 3169 err = -EOPNOTSUPP;
3155 goto err_ct; 3170 goto err_ct;
@@ -3159,11 +3174,13 @@ ctnetlink_create_expect(struct net *net,
3159 exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask); 3174 exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask);
3160 if (IS_ERR(exp)) { 3175 if (IS_ERR(exp)) {
3161 err = PTR_ERR(exp); 3176 err = PTR_ERR(exp);
3162 goto err_ct; 3177 goto err_rcu;
3163 } 3178 }
3164 3179
3165 err = nf_ct_expect_related_report(exp, portid, report); 3180 err = nf_ct_expect_related_report(exp, portid, report);
3166 nf_ct_expect_put(exp); 3181 nf_ct_expect_put(exp);
3182err_rcu:
3183 rcu_read_unlock();
3167err_ct: 3184err_ct:
3168 nf_ct_put(ct); 3185 nf_ct_put(ct);
3169 return err; 3186 return err;
@@ -3442,6 +3459,7 @@ static void __exit ctnetlink_exit(void)
3442#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT 3459#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
3443 RCU_INIT_POINTER(nfnl_ct_hook, NULL); 3460 RCU_INIT_POINTER(nfnl_ct_hook, NULL);
3444#endif 3461#endif
3462 synchronize_rcu();
3445} 3463}
3446 3464
3447module_init(ctnetlink_init); 3465module_init(ctnetlink_init);
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 94b14c5a8b17..82802e4a6640 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -903,6 +903,8 @@ static void __exit nf_nat_cleanup(void)
903#ifdef CONFIG_XFRM 903#ifdef CONFIG_XFRM
904 RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL); 904 RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL);
905#endif 905#endif
906 synchronize_rcu();
907
906 for (i = 0; i < NFPROTO_NUMPROTO; i++) 908 for (i = 0; i < NFPROTO_NUMPROTO; i++)
907 kfree(nf_nat_l4protos[i]); 909 kfree(nf_nat_l4protos[i]);
908 910
diff --git a/net/netfilter/nf_nat_redirect.c b/net/netfilter/nf_nat_redirect.c
index d43869879fcf..86067560a318 100644
--- a/net/netfilter/nf_nat_redirect.c
+++ b/net/netfilter/nf_nat_redirect.c
@@ -101,11 +101,13 @@ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
101 rcu_read_lock(); 101 rcu_read_lock();
102 idev = __in6_dev_get(skb->dev); 102 idev = __in6_dev_get(skb->dev);
103 if (idev != NULL) { 103 if (idev != NULL) {
104 read_lock_bh(&idev->lock);
104 list_for_each_entry(ifa, &idev->addr_list, if_list) { 105 list_for_each_entry(ifa, &idev->addr_list, if_list) {
105 newdst = ifa->addr; 106 newdst = ifa->addr;
106 addr = true; 107 addr = true;
107 break; 108 break;
108 } 109 }
110 read_unlock_bh(&idev->lock);
109 } 111 }
110 rcu_read_unlock(); 112 rcu_read_unlock();
111 113
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index de8782345c86..d45558178da5 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -32,6 +32,13 @@ MODULE_LICENSE("GPL");
32MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); 32MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
33MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers"); 33MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers");
34 34
35struct nfnl_cthelper {
36 struct list_head list;
37 struct nf_conntrack_helper helper;
38};
39
40static LIST_HEAD(nfnl_cthelper_list);
41
35static int 42static int
36nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff, 43nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff,
37 struct nf_conn *ct, enum ip_conntrack_info ctinfo) 44 struct nf_conn *ct, enum ip_conntrack_info ctinfo)
@@ -161,6 +168,7 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
161 int i, ret; 168 int i, ret;
162 struct nf_conntrack_expect_policy *expect_policy; 169 struct nf_conntrack_expect_policy *expect_policy;
163 struct nlattr *tb[NFCTH_POLICY_SET_MAX+1]; 170 struct nlattr *tb[NFCTH_POLICY_SET_MAX+1];
171 unsigned int class_max;
164 172
165 ret = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr, 173 ret = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
166 nfnl_cthelper_expect_policy_set); 174 nfnl_cthelper_expect_policy_set);
@@ -170,19 +178,18 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
170 if (!tb[NFCTH_POLICY_SET_NUM]) 178 if (!tb[NFCTH_POLICY_SET_NUM])
171 return -EINVAL; 179 return -EINVAL;
172 180
173 helper->expect_class_max = 181 class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
174 ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM])); 182 if (class_max == 0)
175 183 return -EINVAL;
176 if (helper->expect_class_max != 0 && 184 if (class_max > NF_CT_MAX_EXPECT_CLASSES)
177 helper->expect_class_max > NF_CT_MAX_EXPECT_CLASSES)
178 return -EOVERFLOW; 185 return -EOVERFLOW;
179 186
180 expect_policy = kzalloc(sizeof(struct nf_conntrack_expect_policy) * 187 expect_policy = kzalloc(sizeof(struct nf_conntrack_expect_policy) *
181 helper->expect_class_max, GFP_KERNEL); 188 class_max, GFP_KERNEL);
182 if (expect_policy == NULL) 189 if (expect_policy == NULL)
183 return -ENOMEM; 190 return -ENOMEM;
184 191
185 for (i=0; i<helper->expect_class_max; i++) { 192 for (i = 0; i < class_max; i++) {
186 if (!tb[NFCTH_POLICY_SET+i]) 193 if (!tb[NFCTH_POLICY_SET+i])
187 goto err; 194 goto err;
188 195
@@ -191,6 +198,8 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
191 if (ret < 0) 198 if (ret < 0)
192 goto err; 199 goto err;
193 } 200 }
201
202 helper->expect_class_max = class_max - 1;
194 helper->expect_policy = expect_policy; 203 helper->expect_policy = expect_policy;
195 return 0; 204 return 0;
196err: 205err:
@@ -203,18 +212,20 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
203 struct nf_conntrack_tuple *tuple) 212 struct nf_conntrack_tuple *tuple)
204{ 213{
205 struct nf_conntrack_helper *helper; 214 struct nf_conntrack_helper *helper;
215 struct nfnl_cthelper *nfcth;
206 int ret; 216 int ret;
207 217
208 if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN]) 218 if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN])
209 return -EINVAL; 219 return -EINVAL;
210 220
211 helper = kzalloc(sizeof(struct nf_conntrack_helper), GFP_KERNEL); 221 nfcth = kzalloc(sizeof(*nfcth), GFP_KERNEL);
212 if (helper == NULL) 222 if (nfcth == NULL)
213 return -ENOMEM; 223 return -ENOMEM;
224 helper = &nfcth->helper;
214 225
215 ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]); 226 ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]);
216 if (ret < 0) 227 if (ret < 0)
217 goto err; 228 goto err1;
218 229
219 strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN); 230 strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN);
220 helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN])); 231 helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
@@ -245,15 +256,101 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
245 256
246 ret = nf_conntrack_helper_register(helper); 257 ret = nf_conntrack_helper_register(helper);
247 if (ret < 0) 258 if (ret < 0)
248 goto err; 259 goto err2;
249 260
261 list_add_tail(&nfcth->list, &nfnl_cthelper_list);
250 return 0; 262 return 0;
251err: 263err2:
252 kfree(helper); 264 kfree(helper->expect_policy);
265err1:
266 kfree(nfcth);
253 return ret; 267 return ret;
254} 268}
255 269
256static int 270static int
271nfnl_cthelper_update_policy_one(const struct nf_conntrack_expect_policy *policy,
272 struct nf_conntrack_expect_policy *new_policy,
273 const struct nlattr *attr)
274{
275 struct nlattr *tb[NFCTH_POLICY_MAX + 1];
276 int err;
277
278 err = nla_parse_nested(tb, NFCTH_POLICY_MAX, attr,
279 nfnl_cthelper_expect_pol);
280 if (err < 0)
281 return err;
282
283 if (!tb[NFCTH_POLICY_NAME] ||
284 !tb[NFCTH_POLICY_EXPECT_MAX] ||
285 !tb[NFCTH_POLICY_EXPECT_TIMEOUT])
286 return -EINVAL;
287
288 if (nla_strcmp(tb[NFCTH_POLICY_NAME], policy->name))
289 return -EBUSY;
290
291 new_policy->max_expected =
292 ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
293 new_policy->timeout =
294 ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT]));
295
296 return 0;
297}
298
299static int nfnl_cthelper_update_policy_all(struct nlattr *tb[],
300 struct nf_conntrack_helper *helper)
301{
302 struct nf_conntrack_expect_policy new_policy[helper->expect_class_max + 1];
303 struct nf_conntrack_expect_policy *policy;
304 int i, err;
305
306 /* Check first that all policy attributes are well-formed, so we don't
307 * leave things in inconsistent state on errors.
308 */
309 for (i = 0; i < helper->expect_class_max + 1; i++) {
310
311 if (!tb[NFCTH_POLICY_SET + i])
312 return -EINVAL;
313
314 err = nfnl_cthelper_update_policy_one(&helper->expect_policy[i],
315 &new_policy[i],
316 tb[NFCTH_POLICY_SET + i]);
317 if (err < 0)
318 return err;
319 }
320 /* Now we can safely update them. */
321 for (i = 0; i < helper->expect_class_max + 1; i++) {
322 policy = (struct nf_conntrack_expect_policy *)
323 &helper->expect_policy[i];
324 policy->max_expected = new_policy->max_expected;
325 policy->timeout = new_policy->timeout;
326 }
327
328 return 0;
329}
330
331static int nfnl_cthelper_update_policy(struct nf_conntrack_helper *helper,
332 const struct nlattr *attr)
333{
334 struct nlattr *tb[NFCTH_POLICY_SET_MAX + 1];
335 unsigned int class_max;
336 int err;
337
338 err = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
339 nfnl_cthelper_expect_policy_set);
340 if (err < 0)
341 return err;
342
343 if (!tb[NFCTH_POLICY_SET_NUM])
344 return -EINVAL;
345
346 class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
347 if (helper->expect_class_max + 1 != class_max)
348 return -EBUSY;
349
350 return nfnl_cthelper_update_policy_all(tb, helper);
351}
352
353static int
257nfnl_cthelper_update(const struct nlattr * const tb[], 354nfnl_cthelper_update(const struct nlattr * const tb[],
258 struct nf_conntrack_helper *helper) 355 struct nf_conntrack_helper *helper)
259{ 356{
@@ -263,8 +360,7 @@ nfnl_cthelper_update(const struct nlattr * const tb[],
263 return -EBUSY; 360 return -EBUSY;
264 361
265 if (tb[NFCTH_POLICY]) { 362 if (tb[NFCTH_POLICY]) {
266 ret = nfnl_cthelper_parse_expect_policy(helper, 363 ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);
267 tb[NFCTH_POLICY]);
268 if (ret < 0) 364 if (ret < 0)
269 return ret; 365 return ret;
270 } 366 }
@@ -293,7 +389,8 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
293 const char *helper_name; 389 const char *helper_name;
294 struct nf_conntrack_helper *cur, *helper = NULL; 390 struct nf_conntrack_helper *cur, *helper = NULL;
295 struct nf_conntrack_tuple tuple; 391 struct nf_conntrack_tuple tuple;
296 int ret = 0, i; 392 struct nfnl_cthelper *nlcth;
393 int ret = 0;
297 394
298 if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE]) 395 if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
299 return -EINVAL; 396 return -EINVAL;
@@ -304,31 +401,22 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
304 if (ret < 0) 401 if (ret < 0)
305 return ret; 402 return ret;
306 403
307 rcu_read_lock(); 404 list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
308 for (i = 0; i < nf_ct_helper_hsize && !helper; i++) { 405 cur = &nlcth->helper;
309 hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
310 406
311 /* skip non-userspace conntrack helpers. */ 407 if (strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
312 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) 408 continue;
313 continue;
314 409
315 if (strncmp(cur->name, helper_name, 410 if ((tuple.src.l3num != cur->tuple.src.l3num ||
316 NF_CT_HELPER_NAME_LEN) != 0) 411 tuple.dst.protonum != cur->tuple.dst.protonum))
317 continue; 412 continue;
318 413
319 if ((tuple.src.l3num != cur->tuple.src.l3num || 414 if (nlh->nlmsg_flags & NLM_F_EXCL)
320 tuple.dst.protonum != cur->tuple.dst.protonum)) 415 return -EEXIST;
321 continue;
322 416
323 if (nlh->nlmsg_flags & NLM_F_EXCL) { 417 helper = cur;
324 ret = -EEXIST; 418 break;
325 goto err;
326 }
327 helper = cur;
328 break;
329 }
330 } 419 }
331 rcu_read_unlock();
332 420
333 if (helper == NULL) 421 if (helper == NULL)
334 ret = nfnl_cthelper_create(tb, &tuple); 422 ret = nfnl_cthelper_create(tb, &tuple);
@@ -336,9 +424,6 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
336 ret = nfnl_cthelper_update(tb, helper); 424 ret = nfnl_cthelper_update(tb, helper);
337 425
338 return ret; 426 return ret;
339err:
340 rcu_read_unlock();
341 return ret;
342} 427}
343 428
344static int 429static int
@@ -377,10 +462,10 @@ nfnl_cthelper_dump_policy(struct sk_buff *skb,
377 goto nla_put_failure; 462 goto nla_put_failure;
378 463
379 if (nla_put_be32(skb, NFCTH_POLICY_SET_NUM, 464 if (nla_put_be32(skb, NFCTH_POLICY_SET_NUM,
380 htonl(helper->expect_class_max))) 465 htonl(helper->expect_class_max + 1)))
381 goto nla_put_failure; 466 goto nla_put_failure;
382 467
383 for (i=0; i<helper->expect_class_max; i++) { 468 for (i = 0; i < helper->expect_class_max + 1; i++) {
384 nest_parms2 = nla_nest_start(skb, 469 nest_parms2 = nla_nest_start(skb,
385 (NFCTH_POLICY_SET+i) | NLA_F_NESTED); 470 (NFCTH_POLICY_SET+i) | NLA_F_NESTED);
386 if (nest_parms2 == NULL) 471 if (nest_parms2 == NULL)
@@ -502,11 +587,12 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl,
502 struct sk_buff *skb, const struct nlmsghdr *nlh, 587 struct sk_buff *skb, const struct nlmsghdr *nlh,
503 const struct nlattr * const tb[]) 588 const struct nlattr * const tb[])
504{ 589{
505 int ret = -ENOENT, i; 590 int ret = -ENOENT;
506 struct nf_conntrack_helper *cur; 591 struct nf_conntrack_helper *cur;
507 struct sk_buff *skb2; 592 struct sk_buff *skb2;
508 char *helper_name = NULL; 593 char *helper_name = NULL;
509 struct nf_conntrack_tuple tuple; 594 struct nf_conntrack_tuple tuple;
595 struct nfnl_cthelper *nlcth;
510 bool tuple_set = false; 596 bool tuple_set = false;
511 597
512 if (nlh->nlmsg_flags & NLM_F_DUMP) { 598 if (nlh->nlmsg_flags & NLM_F_DUMP) {
@@ -527,45 +613,39 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl,
527 tuple_set = true; 613 tuple_set = true;
528 } 614 }
529 615
530 for (i = 0; i < nf_ct_helper_hsize; i++) { 616 list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
531 hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) { 617 cur = &nlcth->helper;
618 if (helper_name &&
619 strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
620 continue;
532 621
533 /* skip non-userspace conntrack helpers. */ 622 if (tuple_set &&
534 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) 623 (tuple.src.l3num != cur->tuple.src.l3num ||
535 continue; 624 tuple.dst.protonum != cur->tuple.dst.protonum))
625 continue;
536 626
537 if (helper_name && strncmp(cur->name, helper_name, 627 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
538 NF_CT_HELPER_NAME_LEN) != 0) { 628 if (skb2 == NULL) {
539 continue; 629 ret = -ENOMEM;
540 } 630 break;
541 if (tuple_set && 631 }
542 (tuple.src.l3num != cur->tuple.src.l3num ||
543 tuple.dst.protonum != cur->tuple.dst.protonum))
544 continue;
545
546 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
547 if (skb2 == NULL) {
548 ret = -ENOMEM;
549 break;
550 }
551 632
552 ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid, 633 ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
553 nlh->nlmsg_seq, 634 nlh->nlmsg_seq,
554 NFNL_MSG_TYPE(nlh->nlmsg_type), 635 NFNL_MSG_TYPE(nlh->nlmsg_type),
555 NFNL_MSG_CTHELPER_NEW, cur); 636 NFNL_MSG_CTHELPER_NEW, cur);
556 if (ret <= 0) { 637 if (ret <= 0) {
557 kfree_skb(skb2); 638 kfree_skb(skb2);
558 break; 639 break;
559 } 640 }
560 641
561 ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid, 642 ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
562 MSG_DONTWAIT); 643 MSG_DONTWAIT);
563 if (ret > 0) 644 if (ret > 0)
564 ret = 0; 645 ret = 0;
565 646
566 /* this avoids a loop in nfnetlink. */ 647 /* this avoids a loop in nfnetlink. */
567 return ret == -EAGAIN ? -ENOBUFS : ret; 648 return ret == -EAGAIN ? -ENOBUFS : ret;
568 }
569 } 649 }
570 return ret; 650 return ret;
571} 651}
@@ -576,10 +656,10 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
576{ 656{
577 char *helper_name = NULL; 657 char *helper_name = NULL;
578 struct nf_conntrack_helper *cur; 658 struct nf_conntrack_helper *cur;
579 struct hlist_node *tmp;
580 struct nf_conntrack_tuple tuple; 659 struct nf_conntrack_tuple tuple;
581 bool tuple_set = false, found = false; 660 bool tuple_set = false, found = false;
582 int i, j = 0, ret; 661 struct nfnl_cthelper *nlcth, *n;
662 int j = 0, ret;
583 663
584 if (tb[NFCTH_NAME]) 664 if (tb[NFCTH_NAME])
585 helper_name = nla_data(tb[NFCTH_NAME]); 665 helper_name = nla_data(tb[NFCTH_NAME]);
@@ -592,28 +672,27 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
592 tuple_set = true; 672 tuple_set = true;
593 } 673 }
594 674
595 for (i = 0; i < nf_ct_helper_hsize; i++) { 675 list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
596 hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i], 676 cur = &nlcth->helper;
597 hnode) { 677 j++;
598 /* skip non-userspace conntrack helpers. */
599 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
600 continue;
601 678
602 j++; 679 if (helper_name &&
680 strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
681 continue;
603 682
604 if (helper_name && strncmp(cur->name, helper_name, 683 if (tuple_set &&
605 NF_CT_HELPER_NAME_LEN) != 0) { 684 (tuple.src.l3num != cur->tuple.src.l3num ||
606 continue; 685 tuple.dst.protonum != cur->tuple.dst.protonum))
607 } 686 continue;
608 if (tuple_set &&
609 (tuple.src.l3num != cur->tuple.src.l3num ||
610 tuple.dst.protonum != cur->tuple.dst.protonum))
611 continue;
612 687
613 found = true; 688 found = true;
614 nf_conntrack_helper_unregister(cur); 689 nf_conntrack_helper_unregister(cur);
615 } 690 kfree(cur->expect_policy);
691
692 list_del(&nlcth->list);
693 kfree(nlcth);
616 } 694 }
695
617 /* Make sure we return success if we flush and there is no helpers */ 696 /* Make sure we return success if we flush and there is no helpers */
618 return (found || j == 0) ? 0 : -ENOENT; 697 return (found || j == 0) ? 0 : -ENOENT;
619} 698}
@@ -662,20 +741,16 @@ err_out:
662static void __exit nfnl_cthelper_exit(void) 741static void __exit nfnl_cthelper_exit(void)
663{ 742{
664 struct nf_conntrack_helper *cur; 743 struct nf_conntrack_helper *cur;
665 struct hlist_node *tmp; 744 struct nfnl_cthelper *nlcth, *n;
666 int i;
667 745
668 nfnetlink_subsys_unregister(&nfnl_cthelper_subsys); 746 nfnetlink_subsys_unregister(&nfnl_cthelper_subsys);
669 747
670 for (i=0; i<nf_ct_helper_hsize; i++) { 748 list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
671 hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i], 749 cur = &nlcth->helper;
672 hnode) {
673 /* skip non-userspace conntrack helpers. */
674 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
675 continue;
676 750
677 nf_conntrack_helper_unregister(cur); 751 nf_conntrack_helper_unregister(cur);
678 } 752 kfree(cur->expect_policy);
753 kfree(nlcth);
679 } 754 }
680} 755}
681 756
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 139e0867e56e..47d6656c9119 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -646,8 +646,8 @@ static void __exit cttimeout_exit(void)
646#ifdef CONFIG_NF_CONNTRACK_TIMEOUT 646#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
647 RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL); 647 RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL);
648 RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL); 648 RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL);
649 synchronize_rcu();
649#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 650#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
650 rcu_barrier();
651} 651}
652 652
653module_init(cttimeout_init); 653module_init(cttimeout_init);
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 3ee0b8a000a4..933509ebf3d3 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -443,7 +443,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
443 skb = alloc_skb(size, GFP_ATOMIC); 443 skb = alloc_skb(size, GFP_ATOMIC);
444 if (!skb) { 444 if (!skb) {
445 skb_tx_error(entskb); 445 skb_tx_error(entskb);
446 return NULL; 446 goto nlmsg_failure;
447 } 447 }
448 448
449 nlh = nlmsg_put(skb, 0, 0, 449 nlh = nlmsg_put(skb, 0, 0,
@@ -452,7 +452,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
452 if (!nlh) { 452 if (!nlh) {
453 skb_tx_error(entskb); 453 skb_tx_error(entskb);
454 kfree_skb(skb); 454 kfree_skb(skb);
455 return NULL; 455 goto nlmsg_failure;
456 } 456 }
457 nfmsg = nlmsg_data(nlh); 457 nfmsg = nlmsg_data(nlh);
458 nfmsg->nfgen_family = entry->state.pf; 458 nfmsg->nfgen_family = entry->state.pf;
@@ -598,12 +598,17 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
598 } 598 }
599 599
600 nlh->nlmsg_len = skb->len; 600 nlh->nlmsg_len = skb->len;
601 if (seclen)
602 security_release_secctx(secdata, seclen);
601 return skb; 603 return skb;
602 604
603nla_put_failure: 605nla_put_failure:
604 skb_tx_error(entskb); 606 skb_tx_error(entskb);
605 kfree_skb(skb); 607 kfree_skb(skb);
606 net_err_ratelimited("nf_queue: error creating packet message\n"); 608 net_err_ratelimited("nf_queue: error creating packet message\n");
609nlmsg_failure:
610 if (seclen)
611 security_release_secctx(secdata, seclen);
607 return NULL; 612 return NULL;
608} 613}
609 614
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index eb2721af898d..c4dad1254ead 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -21,6 +21,7 @@ struct nft_hash {
21 enum nft_registers sreg:8; 21 enum nft_registers sreg:8;
22 enum nft_registers dreg:8; 22 enum nft_registers dreg:8;
23 u8 len; 23 u8 len;
24 bool autogen_seed:1;
24 u32 modulus; 25 u32 modulus;
25 u32 seed; 26 u32 seed;
26 u32 offset; 27 u32 offset;
@@ -82,10 +83,12 @@ static int nft_hash_init(const struct nft_ctx *ctx,
82 if (priv->offset + priv->modulus - 1 < priv->offset) 83 if (priv->offset + priv->modulus - 1 < priv->offset)
83 return -EOVERFLOW; 84 return -EOVERFLOW;
84 85
85 if (tb[NFTA_HASH_SEED]) 86 if (tb[NFTA_HASH_SEED]) {
86 priv->seed = ntohl(nla_get_be32(tb[NFTA_HASH_SEED])); 87 priv->seed = ntohl(nla_get_be32(tb[NFTA_HASH_SEED]));
87 else 88 } else {
89 priv->autogen_seed = true;
88 get_random_bytes(&priv->seed, sizeof(priv->seed)); 90 get_random_bytes(&priv->seed, sizeof(priv->seed));
91 }
89 92
90 return nft_validate_register_load(priv->sreg, len) && 93 return nft_validate_register_load(priv->sreg, len) &&
91 nft_validate_register_store(ctx, priv->dreg, NULL, 94 nft_validate_register_store(ctx, priv->dreg, NULL,
@@ -105,7 +108,8 @@ static int nft_hash_dump(struct sk_buff *skb,
105 goto nla_put_failure; 108 goto nla_put_failure;
106 if (nla_put_be32(skb, NFTA_HASH_MODULUS, htonl(priv->modulus))) 109 if (nla_put_be32(skb, NFTA_HASH_MODULUS, htonl(priv->modulus)))
107 goto nla_put_failure; 110 goto nla_put_failure;
108 if (nla_put_be32(skb, NFTA_HASH_SEED, htonl(priv->seed))) 111 if (!priv->autogen_seed &&
112 nla_put_be32(skb, NFTA_HASH_SEED, htonl(priv->seed)))
109 goto nla_put_failure; 113 goto nla_put_failure;
110 if (priv->offset != 0) 114 if (priv->offset != 0)
111 if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset))) 115 if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset)))
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 27241a767f17..c64aca611ac5 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -104,7 +104,7 @@ tcpmss_mangle_packet(struct sk_buff *skb,
104 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); 104 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
105 tcp_hdrlen = tcph->doff * 4; 105 tcp_hdrlen = tcph->doff * 4;
106 106
107 if (len < tcp_hdrlen) 107 if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr))
108 return -1; 108 return -1;
109 109
110 if (info->mss == XT_TCPMSS_CLAMP_PMTU) { 110 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
@@ -152,6 +152,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
152 if (len > tcp_hdrlen) 152 if (len > tcp_hdrlen)
153 return 0; 153 return 0;
154 154
155 /* tcph->doff has 4 bits, do not wrap it to 0 */
156 if (tcp_hdrlen >= 15 * 4)
157 return 0;
158
155 /* 159 /*
156 * MSS Option not found ?! add it.. 160 * MSS Option not found ?! add it..
157 */ 161 */
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 80cb7babeb64..df7f1df00330 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -393,7 +393,8 @@ tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
393 393
394 rcu_read_lock(); 394 rcu_read_lock();
395 indev = __in6_dev_get(skb->dev); 395 indev = __in6_dev_get(skb->dev);
396 if (indev) 396 if (indev) {
397 read_lock_bh(&indev->lock);
397 list_for_each_entry(ifa, &indev->addr_list, if_list) { 398 list_for_each_entry(ifa, &indev->addr_list, if_list) {
398 if (ifa->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED)) 399 if (ifa->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED))
399 continue; 400 continue;
@@ -401,6 +402,8 @@ tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
401 laddr = &ifa->addr; 402 laddr = &ifa->addr;
402 break; 403 break;
403 } 404 }
405 read_unlock_bh(&indev->lock);
406 }
404 rcu_read_unlock(); 407 rcu_read_unlock();
405 408
406 return laddr ? laddr : daddr; 409 return laddr ? laddr : daddr;
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index e0a87776a010..7b2c2fce408a 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -643,8 +643,8 @@ static bool skb_nfct_cached(struct net *net,
643 */ 643 */
644 if (nf_ct_is_confirmed(ct)) 644 if (nf_ct_is_confirmed(ct))
645 nf_ct_delete(ct, 0, 0); 645 nf_ct_delete(ct, 0, 0);
646 else 646
647 nf_conntrack_put(&ct->ct_general); 647 nf_conntrack_put(&ct->ct_general);
648 nf_ct_set(skb, NULL, 0); 648 nf_ct_set(skb, NULL, 0);
649 return false; 649 return false;
650 } 650 }
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 9d4bb8eb63f2..3f76cb765e5b 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -527,7 +527,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
527 527
528 /* Link layer. */ 528 /* Link layer. */
529 clear_vlan(key); 529 clear_vlan(key);
530 if (key->mac_proto == MAC_PROTO_NONE) { 530 if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
531 if (unlikely(eth_type_vlan(skb->protocol))) 531 if (unlikely(eth_type_vlan(skb->protocol)))
532 return -EINVAL; 532 return -EINVAL;
533 533
@@ -745,7 +745,13 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
745 745
746int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key) 746int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
747{ 747{
748 return key_extract(skb, key); 748 int res;
749
750 res = key_extract(skb, key);
751 if (!res)
752 key->mac_proto &= ~SW_FLOW_KEY_INVALID;
753
754 return res;
749} 755}
750 756
751static int key_extract_mac_proto(struct sk_buff *skb) 757static int key_extract_mac_proto(struct sk_buff *skb)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index a0dbe7ca8f72..8489beff5c25 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3665,6 +3665,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3665 return -EBUSY; 3665 return -EBUSY;
3666 if (copy_from_user(&val, optval, sizeof(val))) 3666 if (copy_from_user(&val, optval, sizeof(val)))
3667 return -EFAULT; 3667 return -EFAULT;
3668 if (val > INT_MAX)
3669 return -EINVAL;
3668 po->tp_reserve = val; 3670 po->tp_reserve = val;
3669 return 0; 3671 return 0;
3670 } 3672 }
@@ -4193,8 +4195,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4193 if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) 4195 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4194 goto out; 4196 goto out;
4195 if (po->tp_version >= TPACKET_V3 && 4197 if (po->tp_version >= TPACKET_V3 &&
4196 (int)(req->tp_block_size - 4198 req->tp_block_size <=
4197 BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0) 4199 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
4198 goto out; 4200 goto out;
4199 if (unlikely(req->tp_frame_size < po->tp_hdrlen + 4201 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
4200 po->tp_reserve)) 4202 po->tp_reserve))
@@ -4205,6 +4207,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4205 rb->frames_per_block = req->tp_block_size / req->tp_frame_size; 4207 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4206 if (unlikely(rb->frames_per_block == 0)) 4208 if (unlikely(rb->frames_per_block == 0))
4207 goto out; 4209 goto out;
4210 if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
4211 goto out;
4208 if (unlikely((rb->frames_per_block * req->tp_block_nr) != 4212 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4209 req->tp_frame_nr)) 4213 req->tp_frame_nr))
4210 goto out; 4214 goto out;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index b052b27a984e..1a2f9e964330 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -794,7 +794,7 @@ static void attach_default_qdiscs(struct net_device *dev)
794 } 794 }
795 } 795 }
796#ifdef CONFIG_NET_SCHED 796#ifdef CONFIG_NET_SCHED
797 if (dev->qdisc) 797 if (dev->qdisc != &noop_qdisc)
798 qdisc_hash_add(dev->qdisc); 798 qdisc_hash_add(dev->qdisc);
799#endif 799#endif
800} 800}
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 0439a1a68367..a9708da28eb5 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -246,6 +246,9 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
246 if (!sctp_ulpq_init(&asoc->ulpq, asoc)) 246 if (!sctp_ulpq_init(&asoc->ulpq, asoc))
247 goto fail_init; 247 goto fail_init;
248 248
249 if (sctp_stream_new(asoc, gfp))
250 goto fail_init;
251
249 /* Assume that peer would support both address types unless we are 252 /* Assume that peer would support both address types unless we are
250 * told otherwise. 253 * told otherwise.
251 */ 254 */
@@ -264,7 +267,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
264 /* AUTH related initializations */ 267 /* AUTH related initializations */
265 INIT_LIST_HEAD(&asoc->endpoint_shared_keys); 268 INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
266 if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp)) 269 if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp))
267 goto fail_init; 270 goto stream_free;
268 271
269 asoc->active_key_id = ep->active_key_id; 272 asoc->active_key_id = ep->active_key_id;
270 asoc->prsctp_enable = ep->prsctp_enable; 273 asoc->prsctp_enable = ep->prsctp_enable;
@@ -287,6 +290,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
287 290
288 return asoc; 291 return asoc;
289 292
293stream_free:
294 sctp_stream_free(asoc->stream);
290fail_init: 295fail_init:
291 sock_put(asoc->base.sk); 296 sock_put(asoc->base.sk);
292 sctp_endpoint_put(asoc->ep); 297 sctp_endpoint_put(asoc->ep);
@@ -1407,7 +1412,7 @@ sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
1407/* Update the association's pmtu and frag_point by going through all the 1412/* Update the association's pmtu and frag_point by going through all the
1408 * transports. This routine is called when a transport's PMTU has changed. 1413 * transports. This routine is called when a transport's PMTU has changed.
1409 */ 1414 */
1410void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc) 1415void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
1411{ 1416{
1412 struct sctp_transport *t; 1417 struct sctp_transport *t;
1413 __u32 pmtu = 0; 1418 __u32 pmtu = 0;
@@ -1419,8 +1424,8 @@ void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
1419 list_for_each_entry(t, &asoc->peer.transport_addr_list, 1424 list_for_each_entry(t, &asoc->peer.transport_addr_list,
1420 transports) { 1425 transports) {
1421 if (t->pmtu_pending && t->dst) { 1426 if (t->pmtu_pending && t->dst) {
1422 sctp_transport_update_pmtu(sk, t, 1427 sctp_transport_update_pmtu(
1423 SCTP_TRUNC4(dst_mtu(t->dst))); 1428 t, SCTP_TRUNC4(dst_mtu(t->dst)));
1424 t->pmtu_pending = 0; 1429 t->pmtu_pending = 0;
1425 } 1430 }
1426 if (!pmtu || (t->pathmtu < pmtu)) 1431 if (!pmtu || (t->pathmtu < pmtu))
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 2a28ab20487f..0e06a278d2a9 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -401,10 +401,10 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
401 401
402 if (t->param_flags & SPP_PMTUD_ENABLE) { 402 if (t->param_flags & SPP_PMTUD_ENABLE) {
403 /* Update transports view of the MTU */ 403 /* Update transports view of the MTU */
404 sctp_transport_update_pmtu(sk, t, pmtu); 404 sctp_transport_update_pmtu(t, pmtu);
405 405
406 /* Update association pmtu. */ 406 /* Update association pmtu. */
407 sctp_assoc_sync_pmtu(sk, asoc); 407 sctp_assoc_sync_pmtu(asoc);
408 } 408 }
409 409
410 /* Retransmit with the new pmtu setting. 410 /* Retransmit with the new pmtu setting.
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 1224421036b3..1409a875ad8e 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -86,43 +86,53 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
86{ 86{
87 struct sctp_transport *tp = packet->transport; 87 struct sctp_transport *tp = packet->transport;
88 struct sctp_association *asoc = tp->asoc; 88 struct sctp_association *asoc = tp->asoc;
89 struct sock *sk;
89 90
90 pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag); 91 pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag);
91
92 packet->vtag = vtag; 92 packet->vtag = vtag;
93 93
94 if (asoc && tp->dst) { 94 /* do the following jobs only once for a flush schedule */
95 struct sock *sk = asoc->base.sk; 95 if (!sctp_packet_empty(packet))
96 96 return;
97 rcu_read_lock();
98 if (__sk_dst_get(sk) != tp->dst) {
99 dst_hold(tp->dst);
100 sk_setup_caps(sk, tp->dst);
101 }
102
103 if (sk_can_gso(sk)) {
104 struct net_device *dev = tp->dst->dev;
105 97
106 packet->max_size = dev->gso_max_size; 98 /* set packet max_size with pathmtu */
107 } else { 99 packet->max_size = tp->pathmtu;
108 packet->max_size = asoc->pathmtu; 100 if (!asoc)
109 } 101 return;
110 rcu_read_unlock();
111 102
112 } else { 103 /* update dst or transport pathmtu if in need */
113 packet->max_size = tp->pathmtu; 104 sk = asoc->base.sk;
105 if (!sctp_transport_dst_check(tp)) {
106 sctp_transport_route(tp, NULL, sctp_sk(sk));
107 if (asoc->param_flags & SPP_PMTUD_ENABLE)
108 sctp_assoc_sync_pmtu(asoc);
109 } else if (!sctp_transport_pmtu_check(tp)) {
110 if (asoc->param_flags & SPP_PMTUD_ENABLE)
111 sctp_assoc_sync_pmtu(asoc);
114 } 112 }
115 113
116 if (ecn_capable && sctp_packet_empty(packet)) { 114 /* If there a is a prepend chunk stick it on the list before
117 struct sctp_chunk *chunk; 115 * any other chunks get appended.
116 */
117 if (ecn_capable) {
118 struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc);
118 119
119 /* If there a is a prepend chunk stick it on the list before
120 * any other chunks get appended.
121 */
122 chunk = sctp_get_ecne_prepend(asoc);
123 if (chunk) 120 if (chunk)
124 sctp_packet_append_chunk(packet, chunk); 121 sctp_packet_append_chunk(packet, chunk);
125 } 122 }
123
124 if (!tp->dst)
125 return;
126
127 /* set packet max_size with gso_max_size if gso is enabled*/
128 rcu_read_lock();
129 if (__sk_dst_get(sk) != tp->dst) {
130 dst_hold(tp->dst);
131 sk_setup_caps(sk, tp->dst);
132 }
133 packet->max_size = sk_can_gso(sk) ? tp->dst->dev->gso_max_size
134 : asoc->pathmtu;
135 rcu_read_unlock();
126} 136}
127 137
128/* Initialize the packet structure. */ 138/* Initialize the packet structure. */
@@ -582,12 +592,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
582 sh->vtag = htonl(packet->vtag); 592 sh->vtag = htonl(packet->vtag);
583 sh->checksum = 0; 593 sh->checksum = 0;
584 594
585 /* update dst if in need */ 595 /* drop packet if no dst */
586 if (!sctp_transport_dst_check(tp)) {
587 sctp_transport_route(tp, NULL, sctp_sk(sk));
588 if (asoc && asoc->param_flags & SPP_PMTUD_ENABLE)
589 sctp_assoc_sync_pmtu(sk, asoc);
590 }
591 dst = dst_clone(tp->dst); 596 dst = dst_clone(tp->dst);
592 if (!dst) { 597 if (!dst) {
593 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); 598 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
@@ -704,7 +709,7 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
704 */ 709 */
705 710
706 if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) && 711 if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) &&
707 !chunk->msg->force_delay) 712 !asoc->force_delay)
708 /* Nothing unacked */ 713 /* Nothing unacked */
709 return SCTP_XMIT_OK; 714 return SCTP_XMIT_OK;
710 715
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 025ccff67072..8081476ed313 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -1026,8 +1026,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1026 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid 1026 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid
1027 * stream identifier. 1027 * stream identifier.
1028 */ 1028 */
1029 if (chunk->sinfo.sinfo_stream >= 1029 if (chunk->sinfo.sinfo_stream >= asoc->stream->outcnt) {
1030 asoc->c.sinit_num_ostreams) {
1031 1030
1032 /* Mark as failed send. */ 1031 /* Mark as failed send. */
1033 sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM); 1032 sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 206377fe91ec..a0b29d43627f 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -361,8 +361,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
361 sctp_seq_dump_remote_addrs(seq, assoc); 361 sctp_seq_dump_remote_addrs(seq, assoc);
362 seq_printf(seq, "\t%8lu %5d %5d %4d %4d %4d %8d " 362 seq_printf(seq, "\t%8lu %5d %5d %4d %4d %4d %8d "
363 "%8d %8d %8d %8d", 363 "%8d %8d %8d %8d",
364 assoc->hbinterval, assoc->c.sinit_max_instreams, 364 assoc->hbinterval, assoc->stream->incnt,
365 assoc->c.sinit_num_ostreams, assoc->max_retrans, 365 assoc->stream->outcnt, assoc->max_retrans,
366 assoc->init_retries, assoc->shutdown_retries, 366 assoc->init_retries, assoc->shutdown_retries,
367 assoc->rtx_data_chunks, 367 assoc->rtx_data_chunks,
368 atomic_read(&sk->sk_wmem_alloc), 368 atomic_read(&sk->sk_wmem_alloc),
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 969a30c7bb54..118faff6a332 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2460,15 +2460,10 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
2460 * association. 2460 * association.
2461 */ 2461 */
2462 if (!asoc->temp) { 2462 if (!asoc->temp) {
2463 int error; 2463 if (sctp_stream_init(asoc, gfp))
2464
2465 asoc->stream = sctp_stream_new(asoc->c.sinit_max_instreams,
2466 asoc->c.sinit_num_ostreams, gfp);
2467 if (!asoc->stream)
2468 goto clean_up; 2464 goto clean_up;
2469 2465
2470 error = sctp_assoc_set_id(asoc, gfp); 2466 if (sctp_assoc_set_id(asoc, gfp))
2471 if (error)
2472 goto clean_up; 2467 goto clean_up;
2473 } 2468 }
2474 2469
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index e03bb1aab4d0..24c6ccce7539 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3946,7 +3946,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net,
3946 3946
3947 /* Silently discard the chunk if stream-id is not valid */ 3947 /* Silently discard the chunk if stream-id is not valid */
3948 sctp_walk_fwdtsn(skip, chunk) { 3948 sctp_walk_fwdtsn(skip, chunk) {
3949 if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams) 3949 if (ntohs(skip->stream) >= asoc->stream->incnt)
3950 goto discard_noforce; 3950 goto discard_noforce;
3951 } 3951 }
3952 3952
@@ -4017,7 +4017,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
4017 4017
4018 /* Silently discard the chunk if stream-id is not valid */ 4018 /* Silently discard the chunk if stream-id is not valid */
4019 sctp_walk_fwdtsn(skip, chunk) { 4019 sctp_walk_fwdtsn(skip, chunk) {
4020 if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams) 4020 if (ntohs(skip->stream) >= asoc->stream->incnt)
4021 goto gen_shutdown; 4021 goto gen_shutdown;
4022 } 4022 }
4023 4023
@@ -6353,7 +6353,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
6353 * and discard the DATA chunk. 6353 * and discard the DATA chunk.
6354 */ 6354 */
6355 sid = ntohs(data_hdr->stream); 6355 sid = ntohs(data_hdr->stream);
6356 if (sid >= asoc->c.sinit_max_instreams) { 6356 if (sid >= asoc->stream->incnt) {
6357 /* Mark tsn as received even though we drop it */ 6357 /* Mark tsn as received even though we drop it */
6358 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); 6358 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
6359 6359
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 0f378ea2ae38..d9d4c92e06b3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1907,7 +1907,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
1907 } 1907 }
1908 1908
1909 if (asoc->pmtu_pending) 1909 if (asoc->pmtu_pending)
1910 sctp_assoc_pending_pmtu(sk, asoc); 1910 sctp_assoc_pending_pmtu(asoc);
1911 1911
1912 /* If fragmentation is disabled and the message length exceeds the 1912 /* If fragmentation is disabled and the message length exceeds the
1913 * association fragmentation point, return EMSGSIZE. The I-D 1913 * association fragmentation point, return EMSGSIZE. The I-D
@@ -1920,7 +1920,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
1920 } 1920 }
1921 1921
1922 /* Check for invalid stream. */ 1922 /* Check for invalid stream. */
1923 if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) { 1923 if (sinfo->sinfo_stream >= asoc->stream->outcnt) {
1924 err = -EINVAL; 1924 err = -EINVAL;
1925 goto out_free; 1925 goto out_free;
1926 } 1926 }
@@ -1965,7 +1965,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
1965 err = PTR_ERR(datamsg); 1965 err = PTR_ERR(datamsg);
1966 goto out_free; 1966 goto out_free;
1967 } 1967 }
1968 datamsg->force_delay = !!(msg->msg_flags & MSG_MORE); 1968 asoc->force_delay = !!(msg->msg_flags & MSG_MORE);
1969 1969
1970 /* Now send the (possibly) fragmented message. */ 1970 /* Now send the (possibly) fragmented message. */
1971 list_for_each_entry(chunk, &datamsg->chunks, frag_list) { 1971 list_for_each_entry(chunk, &datamsg->chunks, frag_list) {
@@ -2435,7 +2435,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2435 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { 2435 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) {
2436 if (trans) { 2436 if (trans) {
2437 trans->pathmtu = params->spp_pathmtu; 2437 trans->pathmtu = params->spp_pathmtu;
2438 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); 2438 sctp_assoc_sync_pmtu(asoc);
2439 } else if (asoc) { 2439 } else if (asoc) {
2440 asoc->pathmtu = params->spp_pathmtu; 2440 asoc->pathmtu = params->spp_pathmtu;
2441 } else { 2441 } else {
@@ -2451,7 +2451,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2451 (trans->param_flags & ~SPP_PMTUD) | pmtud_change; 2451 (trans->param_flags & ~SPP_PMTUD) | pmtud_change;
2452 if (update) { 2452 if (update) {
2453 sctp_transport_pmtu(trans, sctp_opt2sk(sp)); 2453 sctp_transport_pmtu(trans, sctp_opt2sk(sp));
2454 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); 2454 sctp_assoc_sync_pmtu(asoc);
2455 } 2455 }
2456 } else if (asoc) { 2456 } else if (asoc) {
2457 asoc->param_flags = 2457 asoc->param_flags =
@@ -4461,8 +4461,8 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
4461 info->sctpi_rwnd = asoc->a_rwnd; 4461 info->sctpi_rwnd = asoc->a_rwnd;
4462 info->sctpi_unackdata = asoc->unack_data; 4462 info->sctpi_unackdata = asoc->unack_data;
4463 info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); 4463 info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
4464 info->sctpi_instrms = asoc->c.sinit_max_instreams; 4464 info->sctpi_instrms = asoc->stream->incnt;
4465 info->sctpi_outstrms = asoc->c.sinit_num_ostreams; 4465 info->sctpi_outstrms = asoc->stream->outcnt;
4466 list_for_each(pos, &asoc->base.inqueue.in_chunk_list) 4466 list_for_each(pos, &asoc->base.inqueue.in_chunk_list)
4467 info->sctpi_inqueue++; 4467 info->sctpi_inqueue++;
4468 list_for_each(pos, &asoc->outqueue.out_chunk_list) 4468 list_for_each(pos, &asoc->outqueue.out_chunk_list)
@@ -4691,8 +4691,8 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
4691 status.sstat_unackdata = asoc->unack_data; 4691 status.sstat_unackdata = asoc->unack_data;
4692 4692
4693 status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); 4693 status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
4694 status.sstat_instrms = asoc->c.sinit_max_instreams; 4694 status.sstat_instrms = asoc->stream->incnt;
4695 status.sstat_outstrms = asoc->c.sinit_num_ostreams; 4695 status.sstat_outstrms = asoc->stream->outcnt;
4696 status.sstat_fragmentation_point = asoc->frag_point; 4696 status.sstat_fragmentation_point = asoc->frag_point;
4697 status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); 4697 status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
4698 memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, 4698 memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr,
@@ -7034,6 +7034,9 @@ int sctp_inet_listen(struct socket *sock, int backlog)
7034 if (sock->state != SS_UNCONNECTED) 7034 if (sock->state != SS_UNCONNECTED)
7035 goto out; 7035 goto out;
7036 7036
7037 if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED))
7038 goto out;
7039
7037 /* If backlog is zero, disable listening. */ 7040 /* If backlog is zero, disable listening. */
7038 if (!backlog) { 7041 if (!backlog) {
7039 if (sctp_sstate(sk, CLOSED)) 7042 if (sctp_sstate(sk, CLOSED))
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 1c6cc04fa3a4..bbed997e1c5f 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -35,33 +35,60 @@
35#include <net/sctp/sctp.h> 35#include <net/sctp/sctp.h>
36#include <net/sctp/sm.h> 36#include <net/sctp/sm.h>
37 37
38struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp) 38int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp)
39{ 39{
40 struct sctp_stream *stream; 40 struct sctp_stream *stream;
41 int i; 41 int i;
42 42
43 stream = kzalloc(sizeof(*stream), gfp); 43 stream = kzalloc(sizeof(*stream), gfp);
44 if (!stream) 44 if (!stream)
45 return NULL; 45 return -ENOMEM;
46 46
47 stream->outcnt = outcnt; 47 stream->outcnt = asoc->c.sinit_num_ostreams;
48 stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp); 48 stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp);
49 if (!stream->out) { 49 if (!stream->out) {
50 kfree(stream); 50 kfree(stream);
51 return NULL; 51 return -ENOMEM;
52 } 52 }
53 for (i = 0; i < stream->outcnt; i++) 53 for (i = 0; i < stream->outcnt; i++)
54 stream->out[i].state = SCTP_STREAM_OPEN; 54 stream->out[i].state = SCTP_STREAM_OPEN;
55 55
56 stream->incnt = incnt; 56 asoc->stream = stream;
57
58 return 0;
59}
60
61int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp)
62{
63 struct sctp_stream *stream = asoc->stream;
64 int i;
65
66 /* Initial stream->out size may be very big, so free it and alloc
67 * a new one with new outcnt to save memory.
68 */
69 kfree(stream->out);
70 stream->outcnt = asoc->c.sinit_num_ostreams;
71 stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp);
72 if (!stream->out)
73 goto nomem;
74
75 for (i = 0; i < stream->outcnt; i++)
76 stream->out[i].state = SCTP_STREAM_OPEN;
77
78 stream->incnt = asoc->c.sinit_max_instreams;
57 stream->in = kcalloc(stream->incnt, sizeof(*stream->in), gfp); 79 stream->in = kcalloc(stream->incnt, sizeof(*stream->in), gfp);
58 if (!stream->in) { 80 if (!stream->in) {
59 kfree(stream->out); 81 kfree(stream->out);
60 kfree(stream); 82 goto nomem;
61 return NULL;
62 } 83 }
63 84
64 return stream; 85 return 0;
86
87nomem:
88 asoc->stream = NULL;
89 kfree(stream);
90
91 return -ENOMEM;
65} 92}
66 93
67void sctp_stream_free(struct sctp_stream *stream) 94void sctp_stream_free(struct sctp_stream *stream)
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 3379668af368..721eeebfcd8a 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -251,14 +251,13 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
251 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; 251 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
252} 252}
253 253
254void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 pmtu) 254void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
255{ 255{
256 struct dst_entry *dst; 256 struct dst_entry *dst = sctp_transport_dst_check(t);
257 257
258 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { 258 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
259 pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n", 259 pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n",
260 __func__, pmtu, 260 __func__, pmtu, SCTP_DEFAULT_MINSEGMENT);
261 SCTP_DEFAULT_MINSEGMENT);
262 /* Use default minimum segment size and disable 261 /* Use default minimum segment size and disable
263 * pmtu discovery on this transport. 262 * pmtu discovery on this transport.
264 */ 263 */
@@ -267,17 +266,13 @@ void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 p
267 t->pathmtu = pmtu; 266 t->pathmtu = pmtu;
268 } 267 }
269 268
270 dst = sctp_transport_dst_check(t);
271 if (!dst)
272 t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
273
274 if (dst) { 269 if (dst) {
275 dst->ops->update_pmtu(dst, sk, NULL, pmtu); 270 dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu);
276
277 dst = sctp_transport_dst_check(t); 271 dst = sctp_transport_dst_check(t);
278 if (!dst)
279 t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
280 } 272 }
273
274 if (!dst)
275 t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk);
281} 276}
282 277
283/* Caches the dst entry and source address for a transport's destination 278/* Caches the dst entry and source address for a transport's destination
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 8931e33b6541..2b720fa35c4f 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1635,6 +1635,7 @@ static struct svc_xprt *svc_bc_create_socket(struct svc_serv *serv,
1635 1635
1636 xprt = &svsk->sk_xprt; 1636 xprt = &svsk->sk_xprt;
1637 svc_xprt_init(net, &svc_tcp_bc_class, xprt, serv); 1637 svc_xprt_init(net, &svc_tcp_bc_class, xprt, serv);
1638 set_bit(XPT_CONG_CTRL, &svsk->sk_xprt.xpt_flags);
1638 1639
1639 serv->sv_bc_xprt = xprt; 1640 serv->sv_bc_xprt = xprt;
1640 1641
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index c13a5c35ce14..fc8f14c7bfec 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -127,6 +127,7 @@ static struct svc_xprt *svc_rdma_bc_create(struct svc_serv *serv,
127 xprt = &cma_xprt->sc_xprt; 127 xprt = &cma_xprt->sc_xprt;
128 128
129 svc_xprt_init(net, &svc_rdma_bc_class, xprt, serv); 129 svc_xprt_init(net, &svc_rdma_bc_class, xprt, serv);
130 set_bit(XPT_CONG_CTRL, &xprt->xpt_flags);
130 serv->sv_bc_xprt = xprt; 131 serv->sv_bc_xprt = xprt;
131 132
132 dprintk("svcrdma: %s(%p)\n", __func__, xprt); 133 dprintk("svcrdma: %s(%p)\n", __func__, xprt);
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 16b6b5988be9..570a2b67ca10 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -132,12 +132,10 @@ static int wiphy_resume(struct device *dev)
132 /* Age scan results with time spent in suspend */ 132 /* Age scan results with time spent in suspend */
133 cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at); 133 cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at);
134 134
135 if (rdev->ops->resume) { 135 rtnl_lock();
136 rtnl_lock(); 136 if (rdev->wiphy.registered && rdev->ops->resume)
137 if (rdev->wiphy.registered) 137 ret = rdev_resume(rdev);
138 ret = rdev_resume(rdev); 138 rtnl_unlock();
139 rtnl_unlock();
140 }
141 139
142 return ret; 140 return ret;
143} 141}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 9705c279494b..40a8aa39220d 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -412,7 +412,14 @@ static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_es
412 up = nla_data(rp); 412 up = nla_data(rp);
413 ulen = xfrm_replay_state_esn_len(up); 413 ulen = xfrm_replay_state_esn_len(up);
414 414
415 if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen) 415 /* Check the overall length and the internal bitmap length to avoid
416 * potential overflow. */
417 if (nla_len(rp) < ulen ||
418 xfrm_replay_state_esn_len(replay_esn) != ulen ||
419 replay_esn->bmp_len != up->bmp_len)
420 return -EINVAL;
421
422 if (up->replay_window > up->bmp_len * sizeof(__u32) * 8)
416 return -EINVAL; 423 return -EINVAL;
417 424
418 return 0; 425 return 0;
diff --git a/samples/statx/test-statx.c b/samples/statx/test-statx.c
index 8571d766331d..d4d77b09412c 100644
--- a/samples/statx/test-statx.c
+++ b/samples/statx/test-statx.c
@@ -141,8 +141,8 @@ static void dump_statx(struct statx *stx)
141 if (stx->stx_mask & STATX_BTIME) 141 if (stx->stx_mask & STATX_BTIME)
142 print_time(" Birth: ", &stx->stx_btime); 142 print_time(" Birth: ", &stx->stx_btime);
143 143
144 if (stx->stx_attributes) { 144 if (stx->stx_attributes_mask) {
145 unsigned char bits; 145 unsigned char bits, mbits;
146 int loop, byte; 146 int loop, byte;
147 147
148 static char attr_representation[64 + 1] = 148 static char attr_representation[64 + 1] =
@@ -160,14 +160,18 @@ static void dump_statx(struct statx *stx)
160 printf("Attributes: %016llx (", stx->stx_attributes); 160 printf("Attributes: %016llx (", stx->stx_attributes);
161 for (byte = 64 - 8; byte >= 0; byte -= 8) { 161 for (byte = 64 - 8; byte >= 0; byte -= 8) {
162 bits = stx->stx_attributes >> byte; 162 bits = stx->stx_attributes >> byte;
163 mbits = stx->stx_attributes_mask >> byte;
163 for (loop = 7; loop >= 0; loop--) { 164 for (loop = 7; loop >= 0; loop--) {
164 int bit = byte + loop; 165 int bit = byte + loop;
165 166
166 if (bits & 0x80) 167 if (!(mbits & 0x80))
168 putchar('.'); /* Not supported */
169 else if (bits & 0x80)
167 putchar(attr_representation[63 - bit]); 170 putchar(attr_representation[63 - bit]);
168 else 171 else
169 putchar('-'); 172 putchar('-'); /* Not set */
170 bits <<= 1; 173 bits <<= 1;
174 mbits <<= 1;
171 } 175 }
172 if (byte) 176 if (byte)
173 putchar(' '); 177 putchar(' ');
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index d6ca649cb0e9..afe3fd3af1e4 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -148,6 +148,10 @@ cc-fullversion = $(shell $(CONFIG_SHELL) \
148# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1) 148# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1)
149cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4)) 149cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4))
150 150
151# cc-if-fullversion
152# Usage: EXTRA_CFLAGS += $(call cc-if-fullversion, -lt, 040502, -O1)
153cc-if-fullversion = $(shell [ $(cc-fullversion) $(1) $(2) ] && echo $(3) || echo $(4))
154
151# cc-ldoption 155# cc-ldoption
152# Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both) 156# Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
153cc-ldoption = $(call try-run,\ 157cc-ldoption = $(call try-run,\
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 0a07f9014944..7234e61e7ce3 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -155,7 +155,7 @@ else
155# $(call addtree,-I$(obj)) locates .h files in srctree, from generated .c files 155# $(call addtree,-I$(obj)) locates .h files in srctree, from generated .c files
156# and locates generated .h files 156# and locates generated .h files
157# FIXME: Replace both with specific CFLAGS* statements in the makefiles 157# FIXME: Replace both with specific CFLAGS* statements in the makefiles
158__c_flags = $(if $(obj),-I$(srctree)/$(src) -I$(obj)) \ 158__c_flags = $(if $(obj),$(call addtree,-I$(src)) -I$(obj)) \
159 $(call flags,_c_flags) 159 $(call flags,_c_flags)
160__a_flags = $(call flags,_a_flags) 160__a_flags = $(call flags,_a_flags)
161__cpp_flags = $(call flags,_cpp_flags) 161__cpp_flags = $(call flags,_cpp_flags)
diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c
index 26d208b435a0..cfddddb9c9d7 100644
--- a/scripts/kconfig/gconf.c
+++ b/scripts/kconfig/gconf.c
@@ -914,7 +914,7 @@ on_treeview2_button_press_event(GtkWidget * widget,
914 current = menu; 914 current = menu;
915 display_tree_part(); 915 display_tree_part();
916 gtk_widget_set_sensitive(back_btn, TRUE); 916 gtk_widget_set_sensitive(back_btn, TRUE);
917 } else if ((col == COL_OPTION)) { 917 } else if (col == COL_OPTION) {
918 toggle_sym_value(menu); 918 toggle_sym_value(menu);
919 gtk_tree_view_expand_row(view, path, TRUE); 919 gtk_tree_view_expand_row(view, path, TRUE);
920 } 920 }
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index 33980d1c8037..01c4cfe30c9f 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -267,6 +267,10 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
267 /* NOTE: overflow flag is not cleared */ 267 /* NOTE: overflow flag is not cleared */
268 spin_unlock_irqrestore(&f->lock, flags); 268 spin_unlock_irqrestore(&f->lock, flags);
269 269
270 /* close the old pool and wait until all users are gone */
271 snd_seq_pool_mark_closing(oldpool);
272 snd_use_lock_sync(&f->use_lock);
273
270 /* release cells in old pool */ 274 /* release cells in old pool */
271 for (cell = oldhead; cell; cell = next) { 275 for (cell = oldhead; cell; cell = next) {
272 next = cell->next; 276 next = cell->next;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 7f989898cbd9..299835d1fbaa 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4858,6 +4858,7 @@ enum {
4858 ALC292_FIXUP_DISABLE_AAMIX, 4858 ALC292_FIXUP_DISABLE_AAMIX,
4859 ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK, 4859 ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK,
4860 ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, 4860 ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
4861 ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
4861 ALC275_FIXUP_DELL_XPS, 4862 ALC275_FIXUP_DELL_XPS,
4862 ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE, 4863 ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
4863 ALC293_FIXUP_LENOVO_SPK_NOISE, 4864 ALC293_FIXUP_LENOVO_SPK_NOISE,
@@ -5470,6 +5471,15 @@ static const struct hda_fixup alc269_fixups[] = {
5470 .chained = true, 5471 .chained = true,
5471 .chain_id = ALC269_FIXUP_HEADSET_MODE 5472 .chain_id = ALC269_FIXUP_HEADSET_MODE
5472 }, 5473 },
5474 [ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE] = {
5475 .type = HDA_FIXUP_PINS,
5476 .v.pins = (const struct hda_pintbl[]) {
5477 { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */
5478 { }
5479 },
5480 .chained = true,
5481 .chain_id = ALC269_FIXUP_HEADSET_MODE
5482 },
5473 [ALC275_FIXUP_DELL_XPS] = { 5483 [ALC275_FIXUP_DELL_XPS] = {
5474 .type = HDA_FIXUP_VERBS, 5484 .type = HDA_FIXUP_VERBS,
5475 .v.verbs = (const struct hda_verb[]) { 5485 .v.verbs = (const struct hda_verb[]) {
@@ -5542,7 +5552,7 @@ static const struct hda_fixup alc269_fixups[] = {
5542 .type = HDA_FIXUP_FUNC, 5552 .type = HDA_FIXUP_FUNC,
5543 .v.func = alc298_fixup_speaker_volume, 5553 .v.func = alc298_fixup_speaker_volume,
5544 .chained = true, 5554 .chained = true,
5545 .chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, 5555 .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
5546 }, 5556 },
5547 [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = { 5557 [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
5548 .type = HDA_FIXUP_PINS, 5558 .type = HDA_FIXUP_PINS,
diff --git a/sound/soc/atmel/atmel-classd.c b/sound/soc/atmel/atmel-classd.c
index 89ac5f5a93eb..7ae46c2647d4 100644
--- a/sound/soc/atmel/atmel-classd.c
+++ b/sound/soc/atmel/atmel-classd.c
@@ -349,7 +349,7 @@ static int atmel_classd_codec_dai_digital_mute(struct snd_soc_dai *codec_dai,
349} 349}
350 350
351#define CLASSD_ACLK_RATE_11M2896_MPY_8 (112896 * 100 * 8) 351#define CLASSD_ACLK_RATE_11M2896_MPY_8 (112896 * 100 * 8)
352#define CLASSD_ACLK_RATE_12M288_MPY_8 (12228 * 1000 * 8) 352#define CLASSD_ACLK_RATE_12M288_MPY_8 (12288 * 1000 * 8)
353 353
354static struct { 354static struct {
355 int rate; 355 int rate;
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index 78fca8acd3ec..fd272a40485b 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -1534,21 +1534,20 @@ static void hdac_hdmi_eld_notify_cb(void *aptr, int port, int pipe)
1534 pin->mst_capable = false; 1534 pin->mst_capable = false;
1535 /* if not MST, default is port[0] */ 1535 /* if not MST, default is port[0] */
1536 hport = &pin->ports[0]; 1536 hport = &pin->ports[0];
1537 goto out;
1538 } else { 1537 } else {
1539 for (i = 0; i < pin->num_ports; i++) { 1538 for (i = 0; i < pin->num_ports; i++) {
1540 pin->mst_capable = true; 1539 pin->mst_capable = true;
1541 if (pin->ports[i].id == pipe) { 1540 if (pin->ports[i].id == pipe) {
1542 hport = &pin->ports[i]; 1541 hport = &pin->ports[i];
1543 goto out; 1542 break;
1544 } 1543 }
1545 } 1544 }
1546 } 1545 }
1546
1547 if (hport)
1548 hdac_hdmi_present_sense(pin, hport);
1547 } 1549 }
1548 1550
1549out:
1550 if (pin && hport)
1551 hdac_hdmi_present_sense(pin, hport);
1552} 1551}
1553 1552
1554static struct i915_audio_component_audio_ops aops = { 1553static struct i915_audio_component_audio_ops aops = {
@@ -1998,7 +1997,7 @@ static int hdac_hdmi_dev_remove(struct hdac_ext_device *edev)
1998 struct hdac_hdmi_pin *pin, *pin_next; 1997 struct hdac_hdmi_pin *pin, *pin_next;
1999 struct hdac_hdmi_cvt *cvt, *cvt_next; 1998 struct hdac_hdmi_cvt *cvt, *cvt_next;
2000 struct hdac_hdmi_pcm *pcm, *pcm_next; 1999 struct hdac_hdmi_pcm *pcm, *pcm_next;
2001 struct hdac_hdmi_port *port; 2000 struct hdac_hdmi_port *port, *port_next;
2002 int i; 2001 int i;
2003 2002
2004 snd_soc_unregister_codec(&edev->hdac.dev); 2003 snd_soc_unregister_codec(&edev->hdac.dev);
@@ -2008,8 +2007,9 @@ static int hdac_hdmi_dev_remove(struct hdac_ext_device *edev)
2008 if (list_empty(&pcm->port_list)) 2007 if (list_empty(&pcm->port_list))
2009 continue; 2008 continue;
2010 2009
2011 list_for_each_entry(port, &pcm->port_list, head) 2010 list_for_each_entry_safe(port, port_next,
2012 port = NULL; 2011 &pcm->port_list, head)
2012 list_del(&port->head);
2013 2013
2014 list_del(&pcm->head); 2014 list_del(&pcm->head);
2015 kfree(pcm); 2015 kfree(pcm);
diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c
index 324461e985b3..476135ec5726 100644
--- a/sound/soc/codecs/rt5665.c
+++ b/sound/soc/codecs/rt5665.c
@@ -1241,7 +1241,7 @@ static irqreturn_t rt5665_irq(int irq, void *data)
1241static void rt5665_jd_check_handler(struct work_struct *work) 1241static void rt5665_jd_check_handler(struct work_struct *work)
1242{ 1242{
1243 struct rt5665_priv *rt5665 = container_of(work, struct rt5665_priv, 1243 struct rt5665_priv *rt5665 = container_of(work, struct rt5665_priv,
1244 calibrate_work.work); 1244 jd_check_work.work);
1245 1245
1246 if (snd_soc_read(rt5665->codec, RT5665_AJD1_CTRL) & 0x0010) { 1246 if (snd_soc_read(rt5665->codec, RT5665_AJD1_CTRL) & 0x0010) {
1247 /* jack out */ 1247 /* jack out */
@@ -2252,7 +2252,7 @@ static const char * const rt5665_if2_1_adc_in_src[] = {
2252 2252
2253static const SOC_ENUM_SINGLE_DECL( 2253static const SOC_ENUM_SINGLE_DECL(
2254 rt5665_if2_1_adc_in_enum, RT5665_DIG_INF2_DATA, 2254 rt5665_if2_1_adc_in_enum, RT5665_DIG_INF2_DATA,
2255 RT5665_IF3_ADC_IN_SFT, rt5665_if2_1_adc_in_src); 2255 RT5665_IF2_1_ADC_IN_SFT, rt5665_if2_1_adc_in_src);
2256 2256
2257static const struct snd_kcontrol_new rt5665_if2_1_adc_in_mux = 2257static const struct snd_kcontrol_new rt5665_if2_1_adc_in_mux =
2258 SOC_DAPM_ENUM("IF2_1 ADC IN Source", rt5665_if2_1_adc_in_enum); 2258 SOC_DAPM_ENUM("IF2_1 ADC IN Source", rt5665_if2_1_adc_in_enum);
@@ -3178,6 +3178,9 @@ static const struct snd_soc_dapm_route rt5665_dapm_routes[] = {
3178 {"DAC Mono Right Filter", NULL, "DAC Mono R ASRC", is_using_asrc}, 3178 {"DAC Mono Right Filter", NULL, "DAC Mono R ASRC", is_using_asrc},
3179 {"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc}, 3179 {"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc},
3180 {"DAC Stereo2 Filter", NULL, "DAC STO2 ASRC", is_using_asrc}, 3180 {"DAC Stereo2 Filter", NULL, "DAC STO2 ASRC", is_using_asrc},
3181 {"I2S1 ASRC", NULL, "CLKDET"},
3182 {"I2S2 ASRC", NULL, "CLKDET"},
3183 {"I2S3 ASRC", NULL, "CLKDET"},
3181 3184
3182 /*Vref*/ 3185 /*Vref*/
3183 {"Mic Det Power", NULL, "Vref2"}, 3186 {"Mic Det Power", NULL, "Vref2"},
@@ -3912,6 +3915,7 @@ static const struct snd_soc_dapm_route rt5665_dapm_routes[] = {
3912 {"Mono MIX", "MONOVOL Switch", "MONOVOL"}, 3915 {"Mono MIX", "MONOVOL Switch", "MONOVOL"},
3913 {"Mono Amp", NULL, "Mono MIX"}, 3916 {"Mono Amp", NULL, "Mono MIX"},
3914 {"Mono Amp", NULL, "Vref2"}, 3917 {"Mono Amp", NULL, "Vref2"},
3918 {"Mono Amp", NULL, "Vref3"},
3915 {"Mono Amp", NULL, "CLKDET SYS"}, 3919 {"Mono Amp", NULL, "CLKDET SYS"},
3916 {"Mono Amp", NULL, "CLKDET MONO"}, 3920 {"Mono Amp", NULL, "CLKDET MONO"},
3917 {"Mono Playback", "Switch", "Mono Amp"}, 3921 {"Mono Playback", "Switch", "Mono Amp"},
@@ -4798,7 +4802,7 @@ static int rt5665_i2c_probe(struct i2c_client *i2c,
4798 /* Enhance performance*/ 4802 /* Enhance performance*/
4799 regmap_update_bits(rt5665->regmap, RT5665_PWR_ANLG_1, 4803 regmap_update_bits(rt5665->regmap, RT5665_PWR_ANLG_1,
4800 RT5665_HP_DRIVER_MASK | RT5665_LDO1_DVO_MASK, 4804 RT5665_HP_DRIVER_MASK | RT5665_LDO1_DVO_MASK,
4801 RT5665_HP_DRIVER_5X | RT5665_LDO1_DVO_09); 4805 RT5665_HP_DRIVER_5X | RT5665_LDO1_DVO_12);
4802 4806
4803 INIT_DELAYED_WORK(&rt5665->jack_detect_work, 4807 INIT_DELAYED_WORK(&rt5665->jack_detect_work,
4804 rt5665_jack_detect_handler); 4808 rt5665_jack_detect_handler);
diff --git a/sound/soc/codecs/rt5665.h b/sound/soc/codecs/rt5665.h
index 12f7080a0d3c..a30f5e6d0628 100644
--- a/sound/soc/codecs/rt5665.h
+++ b/sound/soc/codecs/rt5665.h
@@ -1106,7 +1106,7 @@
1106#define RT5665_HP_DRIVER_MASK (0x3 << 2) 1106#define RT5665_HP_DRIVER_MASK (0x3 << 2)
1107#define RT5665_HP_DRIVER_1X (0x0 << 2) 1107#define RT5665_HP_DRIVER_1X (0x0 << 2)
1108#define RT5665_HP_DRIVER_3X (0x1 << 2) 1108#define RT5665_HP_DRIVER_3X (0x1 << 2)
1109#define RT5665_HP_DRIVER_5X (0x2 << 2) 1109#define RT5665_HP_DRIVER_5X (0x3 << 2)
1110#define RT5665_LDO1_DVO_MASK (0x3) 1110#define RT5665_LDO1_DVO_MASK (0x3)
1111#define RT5665_LDO1_DVO_09 (0x0) 1111#define RT5665_LDO1_DVO_09 (0x0)
1112#define RT5665_LDO1_DVO_10 (0x1) 1112#define RT5665_LDO1_DVO_10 (0x1)
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index d151224ffcca..bbdb72f73df1 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -899,7 +899,10 @@ static int wm_coeff_put(struct snd_kcontrol *kctl,
899 899
900 mutex_lock(&ctl->dsp->pwr_lock); 900 mutex_lock(&ctl->dsp->pwr_lock);
901 901
902 memcpy(ctl->cache, p, ctl->len); 902 if (ctl->flags & WMFW_CTL_FLAG_VOLATILE)
903 ret = -EPERM;
904 else
905 memcpy(ctl->cache, p, ctl->len);
903 906
904 ctl->set = 1; 907 ctl->set = 1;
905 if (ctl->enabled && ctl->dsp->running) 908 if (ctl->enabled && ctl->dsp->running)
@@ -926,6 +929,8 @@ static int wm_coeff_tlv_put(struct snd_kcontrol *kctl,
926 ctl->set = 1; 929 ctl->set = 1;
927 if (ctl->enabled && ctl->dsp->running) 930 if (ctl->enabled && ctl->dsp->running)
928 ret = wm_coeff_write_control(ctl, ctl->cache, size); 931 ret = wm_coeff_write_control(ctl, ctl->cache, size);
932 else if (ctl->flags & WMFW_CTL_FLAG_VOLATILE)
933 ret = -EPERM;
929 } 934 }
930 935
931 mutex_unlock(&ctl->dsp->pwr_lock); 936 mutex_unlock(&ctl->dsp->pwr_lock);
@@ -947,7 +952,7 @@ static int wm_coeff_put_acked(struct snd_kcontrol *kctl,
947 952
948 mutex_lock(&ctl->dsp->pwr_lock); 953 mutex_lock(&ctl->dsp->pwr_lock);
949 954
950 if (ctl->enabled) 955 if (ctl->enabled && ctl->dsp->running)
951 ret = wm_coeff_write_acked_control(ctl, val); 956 ret = wm_coeff_write_acked_control(ctl, val);
952 else 957 else
953 ret = -EPERM; 958 ret = -EPERM;
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index 4924575d2e95..343b291fc372 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -115,6 +115,7 @@ int asoc_simple_card_parse_clk(struct device *dev,
115 clk = devm_get_clk_from_child(dev, node, NULL); 115 clk = devm_get_clk_from_child(dev, node, NULL);
116 if (!IS_ERR(clk)) { 116 if (!IS_ERR(clk)) {
117 simple_dai->sysclk = clk_get_rate(clk); 117 simple_dai->sysclk = clk_get_rate(clk);
118 simple_dai->clk = clk;
118 } else if (!of_property_read_u32(node, "system-clock-frequency", &val)) { 119 } else if (!of_property_read_u32(node, "system-clock-frequency", &val)) {
119 simple_dai->sysclk = val; 120 simple_dai->sysclk = val;
120 } else { 121 } else {
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index ed58b5b3555a..2dbfb1b24ef4 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -512,7 +512,7 @@ static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w)
512 if (bc->set_params != SKL_PARAM_INIT) 512 if (bc->set_params != SKL_PARAM_INIT)
513 continue; 513 continue;
514 514
515 mconfig->formats_config.caps = (u32 *)&bc->params; 515 mconfig->formats_config.caps = (u32 *)bc->params;
516 mconfig->formats_config.caps_size = bc->size; 516 mconfig->formats_config.caps_size = bc->size;
517 517
518 break; 518 break;
diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
index 05cf809cf9e1..d7013bde6f45 100644
--- a/sound/soc/mediatek/Kconfig
+++ b/sound/soc/mediatek/Kconfig
@@ -13,7 +13,7 @@ config SND_SOC_MT2701
13 13
14config SND_SOC_MT2701_CS42448 14config SND_SOC_MT2701_CS42448
15 tristate "ASoc Audio driver for MT2701 with CS42448 codec" 15 tristate "ASoc Audio driver for MT2701 with CS42448 codec"
16 depends on SND_SOC_MT2701 16 depends on SND_SOC_MT2701 && I2C
17 select SND_SOC_CS42XX8_I2C 17 select SND_SOC_CS42XX8_I2C
18 select SND_SOC_BT_SCO 18 select SND_SOC_BT_SCO
19 help 19 help
diff --git a/sound/soc/sh/rcar/cmd.c b/sound/soc/sh/rcar/cmd.c
index abb5eaac854a..7d92a24b7cfa 100644
--- a/sound/soc/sh/rcar/cmd.c
+++ b/sound/soc/sh/rcar/cmd.c
@@ -31,23 +31,24 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
31 struct rsnd_mod *mix = rsnd_io_to_mod_mix(io); 31 struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
32 struct device *dev = rsnd_priv_to_dev(priv); 32 struct device *dev = rsnd_priv_to_dev(priv);
33 u32 data; 33 u32 data;
34 u32 path[] = {
35 [1] = 1 << 0,
36 [5] = 1 << 8,
37 [6] = 1 << 12,
38 [9] = 1 << 15,
39 };
34 40
35 if (!mix && !dvc) 41 if (!mix && !dvc)
36 return 0; 42 return 0;
37 43
44 if (ARRAY_SIZE(path) < rsnd_mod_id(mod) + 1)
45 return -ENXIO;
46
38 if (mix) { 47 if (mix) {
39 struct rsnd_dai *rdai; 48 struct rsnd_dai *rdai;
40 struct rsnd_mod *src; 49 struct rsnd_mod *src;
41 struct rsnd_dai_stream *tio; 50 struct rsnd_dai_stream *tio;
42 int i; 51 int i;
43 u32 path[] = {
44 [0] = 0,
45 [1] = 1 << 0,
46 [2] = 0,
47 [3] = 0,
48 [4] = 0,
49 [5] = 1 << 8
50 };
51 52
52 /* 53 /*
53 * it is assuming that integrater is well understanding about 54 * it is assuming that integrater is well understanding about
@@ -70,16 +71,19 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
70 } else { 71 } else {
71 struct rsnd_mod *src = rsnd_io_to_mod_src(io); 72 struct rsnd_mod *src = rsnd_io_to_mod_src(io);
72 73
73 u32 path[] = { 74 u8 cmd_case[] = {
74 [0] = 0x30000, 75 [0] = 0x3,
75 [1] = 0x30001, 76 [1] = 0x3,
76 [2] = 0x40000, 77 [2] = 0x4,
77 [3] = 0x10000, 78 [3] = 0x1,
78 [4] = 0x20000, 79 [4] = 0x2,
79 [5] = 0x40100 80 [5] = 0x4,
81 [6] = 0x1,
82 [9] = 0x2,
80 }; 83 };
81 84
82 data = path[rsnd_mod_id(src)]; 85 data = path[rsnd_mod_id(src)] |
86 cmd_case[rsnd_mod_id(src)] << 16;
83 } 87 }
84 88
85 dev_dbg(dev, "ctu/mix path = 0x%08x", data); 89 dev_dbg(dev, "ctu/mix path = 0x%08x", data);
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index 1f405c833867..241cb3b08a07 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -454,6 +454,20 @@ static u32 rsnd_dmapp_read(struct rsnd_dma *dma, u32 reg)
454 return ioread32(rsnd_dmapp_addr(dmac, dma, reg)); 454 return ioread32(rsnd_dmapp_addr(dmac, dma, reg));
455} 455}
456 456
457static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg)
458{
459 struct rsnd_mod *mod = rsnd_mod_get(dma);
460 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
461 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
462 void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg);
463 u32 val = ioread32(addr);
464
465 val &= ~mask;
466 val |= (data & mask);
467
468 iowrite32(val, addr);
469}
470
457static int rsnd_dmapp_stop(struct rsnd_mod *mod, 471static int rsnd_dmapp_stop(struct rsnd_mod *mod,
458 struct rsnd_dai_stream *io, 472 struct rsnd_dai_stream *io,
459 struct rsnd_priv *priv) 473 struct rsnd_priv *priv)
@@ -461,10 +475,10 @@ static int rsnd_dmapp_stop(struct rsnd_mod *mod,
461 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 475 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
462 int i; 476 int i;
463 477
464 rsnd_dmapp_write(dma, 0, PDMACHCR); 478 rsnd_dmapp_bset(dma, 0, PDMACHCR_DE, PDMACHCR);
465 479
466 for (i = 0; i < 1024; i++) { 480 for (i = 0; i < 1024; i++) {
467 if (0 == rsnd_dmapp_read(dma, PDMACHCR)) 481 if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE))
468 return 0; 482 return 0;
469 udelay(1); 483 udelay(1);
470 } 484 }
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
index 4e817c8a18c0..14fafdaf1395 100644
--- a/sound/soc/sh/rcar/ssiu.c
+++ b/sound/soc/sh/rcar/ssiu.c
@@ -64,7 +64,11 @@ static int rsnd_ssiu_init(struct rsnd_mod *mod,
64 mask1 = (1 << 4) | (1 << 20); /* mask sync bit */ 64 mask1 = (1 << 4) | (1 << 20); /* mask sync bit */
65 mask2 = (1 << 4); /* mask sync bit */ 65 mask2 = (1 << 4); /* mask sync bit */
66 val1 = val2 = 0; 66 val1 = val2 = 0;
67 if (rsnd_ssi_is_pin_sharing(io)) { 67 if (id == 8) {
68 /*
69 * SSI8 pin is sharing with SSI7, nothing to do.
70 */
71 } else if (rsnd_ssi_is_pin_sharing(io)) {
68 int shift = -1; 72 int shift = -1;
69 73
70 switch (id) { 74 switch (id) {
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 6dca408faae3..2722bb0c5573 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -3326,7 +3326,10 @@ static int snd_soc_platform_drv_pcm_new(struct snd_soc_pcm_runtime *rtd)
3326{ 3326{
3327 struct snd_soc_platform *platform = rtd->platform; 3327 struct snd_soc_platform *platform = rtd->platform;
3328 3328
3329 return platform->driver->pcm_new(rtd); 3329 if (platform->driver->pcm_new)
3330 return platform->driver->pcm_new(rtd);
3331 else
3332 return 0;
3330} 3333}
3331 3334
3332static void snd_soc_platform_drv_pcm_free(struct snd_pcm *pcm) 3335static void snd_soc_platform_drv_pcm_free(struct snd_pcm *pcm)
@@ -3334,7 +3337,8 @@ static void snd_soc_platform_drv_pcm_free(struct snd_pcm *pcm)
3334 struct snd_soc_pcm_runtime *rtd = pcm->private_data; 3337 struct snd_soc_pcm_runtime *rtd = pcm->private_data;
3335 struct snd_soc_platform *platform = rtd->platform; 3338 struct snd_soc_platform *platform = rtd->platform;
3336 3339
3337 platform->driver->pcm_free(pcm); 3340 if (platform->driver->pcm_free)
3341 platform->driver->pcm_free(pcm);
3338} 3342}
3339 3343
3340/** 3344/**
diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c
index 5992c6ab3833..93a8df6ed880 100644
--- a/sound/soc/sti/uniperif_reader.c
+++ b/sound/soc/sti/uniperif_reader.c
@@ -349,6 +349,8 @@ static int uni_reader_startup(struct snd_pcm_substream *substream,
349 struct uniperif *reader = priv->dai_data.uni; 349 struct uniperif *reader = priv->dai_data.uni;
350 int ret; 350 int ret;
351 351
352 reader->substream = substream;
353
352 if (!UNIPERIF_TYPE_IS_TDM(reader)) 354 if (!UNIPERIF_TYPE_IS_TDM(reader))
353 return 0; 355 return 0;
354 356
@@ -378,6 +380,7 @@ static void uni_reader_shutdown(struct snd_pcm_substream *substream,
378 /* Stop the reader */ 380 /* Stop the reader */
379 uni_reader_stop(reader); 381 uni_reader_stop(reader);
380 } 382 }
383 reader->substream = NULL;
381} 384}
382 385
383static const struct snd_soc_dai_ops uni_reader_dai_ops = { 386static const struct snd_soc_dai_ops uni_reader_dai_ops = {
diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
index b92bdc8361af..7527ba29a5a0 100644
--- a/sound/soc/sunxi/sun8i-codec.c
+++ b/sound/soc/sunxi/sun8i-codec.c
@@ -259,25 +259,20 @@ static int sun8i_codec_hw_params(struct snd_pcm_substream *substream,
259 return 0; 259 return 0;
260} 260}
261 261
262static const struct snd_kcontrol_new sun8i_output_left_mixer_controls[] = { 262static const struct snd_kcontrol_new sun8i_dac_mixer_controls[] = {
263 SOC_DAPM_SINGLE("LSlot 0", SUN8I_DAC_MXR_SRC, 263 SOC_DAPM_DOUBLE("AIF1 Slot 0 Digital DAC Playback Switch",
264 SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA0L, 1, 0), 264 SUN8I_DAC_MXR_SRC,
265 SOC_DAPM_SINGLE("LSlot 1", SUN8I_DAC_MXR_SRC, 265 SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA0L,
266 SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA1L, 1, 0),
267 SOC_DAPM_SINGLE("DACL", SUN8I_DAC_MXR_SRC,
268 SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF2DACL, 1, 0),
269 SOC_DAPM_SINGLE("ADCL", SUN8I_DAC_MXR_SRC,
270 SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_ADCL, 1, 0),
271};
272
273static const struct snd_kcontrol_new sun8i_output_right_mixer_controls[] = {
274 SOC_DAPM_SINGLE("RSlot 0", SUN8I_DAC_MXR_SRC,
275 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF1DA0R, 1, 0), 266 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF1DA0R, 1, 0),
276 SOC_DAPM_SINGLE("RSlot 1", SUN8I_DAC_MXR_SRC, 267 SOC_DAPM_DOUBLE("AIF1 Slot 1 Digital DAC Playback Switch",
268 SUN8I_DAC_MXR_SRC,
269 SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA1L,
277 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF1DA1R, 1, 0), 270 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF1DA1R, 1, 0),
278 SOC_DAPM_SINGLE("DACR", SUN8I_DAC_MXR_SRC, 271 SOC_DAPM_DOUBLE("AIF2 Digital DAC Playback Switch", SUN8I_DAC_MXR_SRC,
272 SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF2DACL,
279 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF2DACR, 1, 0), 273 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF2DACR, 1, 0),
280 SOC_DAPM_SINGLE("ADCR", SUN8I_DAC_MXR_SRC, 274 SOC_DAPM_DOUBLE("ADC Digital DAC Playback Switch", SUN8I_DAC_MXR_SRC,
275 SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_ADCL,
281 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_ADCR, 1, 0), 276 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_ADCR, 1, 0),
282}; 277};
283 278
@@ -286,19 +281,21 @@ static const struct snd_soc_dapm_widget sun8i_codec_dapm_widgets[] = {
286 SND_SOC_DAPM_SUPPLY("DAC", SUN8I_DAC_DIG_CTRL, SUN8I_DAC_DIG_CTRL_ENDA, 281 SND_SOC_DAPM_SUPPLY("DAC", SUN8I_DAC_DIG_CTRL, SUN8I_DAC_DIG_CTRL_ENDA,
287 0, NULL, 0), 282 0, NULL, 0),
288 283
289 /* Analog DAC */ 284 /* Analog DAC AIF */
290 SND_SOC_DAPM_DAC("Digital Left DAC", "Playback", SUN8I_AIF1_DACDAT_CTRL, 285 SND_SOC_DAPM_AIF_IN("AIF1 Slot 0 Left", "Playback", 0,
291 SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0L_ENA, 0), 286 SUN8I_AIF1_DACDAT_CTRL,
292 SND_SOC_DAPM_DAC("Digital Right DAC", "Playback", SUN8I_AIF1_DACDAT_CTRL, 287 SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0L_ENA, 0),
293 SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0R_ENA, 0), 288 SND_SOC_DAPM_AIF_IN("AIF1 Slot 0 Right", "Playback", 0,
289 SUN8I_AIF1_DACDAT_CTRL,
290 SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0R_ENA, 0),
294 291
295 /* DAC Mixers */ 292 /* DAC Mixers */
296 SND_SOC_DAPM_MIXER("Left DAC Mixer", SND_SOC_NOPM, 0, 0, 293 SND_SOC_DAPM_MIXER("Left Digital DAC Mixer", SND_SOC_NOPM, 0, 0,
297 sun8i_output_left_mixer_controls, 294 sun8i_dac_mixer_controls,
298 ARRAY_SIZE(sun8i_output_left_mixer_controls)), 295 ARRAY_SIZE(sun8i_dac_mixer_controls)),
299 SND_SOC_DAPM_MIXER("Right DAC Mixer", SND_SOC_NOPM, 0, 0, 296 SND_SOC_DAPM_MIXER("Right Digital DAC Mixer", SND_SOC_NOPM, 0, 0,
300 sun8i_output_right_mixer_controls, 297 sun8i_dac_mixer_controls,
301 ARRAY_SIZE(sun8i_output_right_mixer_controls)), 298 ARRAY_SIZE(sun8i_dac_mixer_controls)),
302 299
303 /* Clocks */ 300 /* Clocks */
304 SND_SOC_DAPM_SUPPLY("MODCLK AFI1", SUN8I_MOD_CLK_ENA, 301 SND_SOC_DAPM_SUPPLY("MODCLK AFI1", SUN8I_MOD_CLK_ENA,
@@ -321,8 +318,6 @@ static const struct snd_soc_dapm_widget sun8i_codec_dapm_widgets[] = {
321 SUN8I_MOD_RST_CTL_AIF1, 0, NULL, 0), 318 SUN8I_MOD_RST_CTL_AIF1, 0, NULL, 0),
322 SND_SOC_DAPM_SUPPLY("RST DAC", SUN8I_MOD_RST_CTL, 319 SND_SOC_DAPM_SUPPLY("RST DAC", SUN8I_MOD_RST_CTL,
323 SUN8I_MOD_RST_CTL_DAC, 0, NULL, 0), 320 SUN8I_MOD_RST_CTL_DAC, 0, NULL, 0),
324
325 SND_SOC_DAPM_OUTPUT("HP"),
326}; 321};
327 322
328static const struct snd_soc_dapm_route sun8i_codec_dapm_routes[] = { 323static const struct snd_soc_dapm_route sun8i_codec_dapm_routes[] = {
@@ -338,16 +333,14 @@ static const struct snd_soc_dapm_route sun8i_codec_dapm_routes[] = {
338 { "DAC", NULL, "MODCLK DAC" }, 333 { "DAC", NULL, "MODCLK DAC" },
339 334
340 /* DAC Routes */ 335 /* DAC Routes */
341 { "Digital Left DAC", NULL, "DAC" }, 336 { "AIF1 Slot 0 Right", NULL, "DAC" },
342 { "Digital Right DAC", NULL, "DAC" }, 337 { "AIF1 Slot 0 Left", NULL, "DAC" },
343 338
344 /* DAC Mixer Routes */ 339 /* DAC Mixer Routes */
345 { "Left DAC Mixer", "LSlot 0", "Digital Left DAC"}, 340 { "Left Digital DAC Mixer", "AIF1 Slot 0 Digital DAC Playback Switch",
346 { "Right DAC Mixer", "RSlot 0", "Digital Right DAC"}, 341 "AIF1 Slot 0 Left"},
347 342 { "Right Digital DAC Mixer", "AIF1 Slot 0 Digital DAC Playback Switch",
348 /* End of route : HP out */ 343 "AIF1 Slot 0 Right"},
349 { "HP", NULL, "Left DAC Mixer" },
350 { "HP", NULL, "Right DAC Mixer" },
351}; 344};
352 345
353static struct snd_soc_dai_ops sun8i_codec_dai_ops = { 346static struct snd_soc_dai_ops sun8i_codec_dai_ops = {
diff --git a/tools/include/linux/filter.h b/tools/include/linux/filter.h
index 122153b16ea4..390d7c9685fd 100644
--- a/tools/include/linux/filter.h
+++ b/tools/include/linux/filter.h
@@ -168,6 +168,16 @@
168 .off = OFF, \ 168 .off = OFF, \
169 .imm = 0 }) 169 .imm = 0 })
170 170
171/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
172
173#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
174 ((struct bpf_insn) { \
175 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
176 .dst_reg = DST, \
177 .src_reg = SRC, \
178 .off = OFF, \
179 .imm = 0 })
180
171/* Memory store, *(uint *) (dst_reg + off16) = imm32 */ 181/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
172 182
173#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ 183#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 273f21fa32b5..7aa57225cbf7 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -130,6 +130,12 @@ static struct arch architectures[] = {
130 .name = "powerpc", 130 .name = "powerpc",
131 .init = powerpc__annotate_init, 131 .init = powerpc__annotate_init,
132 }, 132 },
133 {
134 .name = "s390",
135 .objdump = {
136 .comment_char = '#',
137 },
138 },
133}; 139};
134 140
135static void ins__delete(struct ins_operands *ops) 141static void ins__delete(struct ins_operands *ops)
diff --git a/tools/power/cpupower/utils/helpers/cpuid.c b/tools/power/cpupower/utils/helpers/cpuid.c
index 93b0aa74ca03..39c2c7d067bb 100644
--- a/tools/power/cpupower/utils/helpers/cpuid.c
+++ b/tools/power/cpupower/utils/helpers/cpuid.c
@@ -156,6 +156,7 @@ out:
156 */ 156 */
157 case 0x2C: /* Westmere EP - Gulftown */ 157 case 0x2C: /* Westmere EP - Gulftown */
158 cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO; 158 cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO;
159 break;
159 case 0x2A: /* SNB */ 160 case 0x2A: /* SNB */
160 case 0x2D: /* SNB Xeon */ 161 case 0x2D: /* SNB Xeon */
161 case 0x3A: /* IVB */ 162 case 0x3A: /* IVB */
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index fedca3285326..ccf2a69365cc 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -100,6 +100,8 @@ The system configuration dump (if --quiet is not used) is followed by statistics
100\fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states. These numbers are from hardware residency counters. 100\fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states. These numbers are from hardware residency counters.
101\fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor. 101\fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor.
102\fBPkgTtmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor. 102\fBPkgTtmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor.
103\fBGFX%rc6\fP The percentage of time the GPU is in the "render C6" state, rc6, during the measurement interval. From /sys/class/drm/card0/power/rc6_residency_ms.
104\fBGFXMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz.
103\fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states. These numbers are from hardware residency counters. 105\fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states. These numbers are from hardware residency counters.
104\fBPkgWatt\fP Watts consumed by the whole package. 106\fBPkgWatt\fP Watts consumed by the whole package.
105\fBCorWatt\fP Watts consumed by the core part of the package. 107\fBCorWatt\fP Watts consumed by the core part of the package.
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 828dccd3f01e..b11294730771 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -1142,7 +1142,7 @@ delta_thread(struct thread_data *new, struct thread_data *old,
1142 * it is possible for mperf's non-halted cycles + idle states 1142 * it is possible for mperf's non-halted cycles + idle states
1143 * to exceed TSC's all cycles: show c1 = 0% in that case. 1143 * to exceed TSC's all cycles: show c1 = 0% in that case.
1144 */ 1144 */
1145 if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc) 1145 if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > (old->tsc * tsc_tweak))
1146 old->c1 = 0; 1146 old->c1 = 0;
1147 else { 1147 else {
1148 /* normal case, derive c1 */ 1148 /* normal case, derive c1 */
@@ -2485,8 +2485,10 @@ int snapshot_gfx_mhz(void)
2485 2485
2486 if (fp == NULL) 2486 if (fp == NULL)
2487 fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", "r"); 2487 fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", "r");
2488 else 2488 else {
2489 rewind(fp); 2489 rewind(fp);
2490 fflush(fp);
2491 }
2490 2492
2491 retval = fscanf(fp, "%d", &gfx_cur_mhz); 2493 retval = fscanf(fp, "%d", &gfx_cur_mhz);
2492 if (retval != 1) 2494 if (retval != 1)
@@ -3111,7 +3113,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3111 return 0; 3113 return 0;
3112 3114
3113 fprintf(outf, "cpu%d: MSR_HWP_CAPABILITIES: 0x%08llx " 3115 fprintf(outf, "cpu%d: MSR_HWP_CAPABILITIES: 0x%08llx "
3114 "(high 0x%x guar 0x%x eff 0x%x low 0x%x)\n", 3116 "(high %d guar %d eff %d low %d)\n",
3115 cpu, msr, 3117 cpu, msr,
3116 (unsigned int)HWP_HIGHEST_PERF(msr), 3118 (unsigned int)HWP_HIGHEST_PERF(msr),
3117 (unsigned int)HWP_GUARANTEED_PERF(msr), 3119 (unsigned int)HWP_GUARANTEED_PERF(msr),
@@ -3122,7 +3124,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3122 return 0; 3124 return 0;
3123 3125
3124 fprintf(outf, "cpu%d: MSR_HWP_REQUEST: 0x%08llx " 3126 fprintf(outf, "cpu%d: MSR_HWP_REQUEST: 0x%08llx "
3125 "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x pkg 0x%x)\n", 3127 "(min %d max %d des %d epp 0x%x window 0x%x pkg 0x%x)\n",
3126 cpu, msr, 3128 cpu, msr,
3127 (unsigned int)(((msr) >> 0) & 0xff), 3129 (unsigned int)(((msr) >> 0) & 0xff),
3128 (unsigned int)(((msr) >> 8) & 0xff), 3130 (unsigned int)(((msr) >> 8) & 0xff),
@@ -3136,7 +3138,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3136 return 0; 3138 return 0;
3137 3139
3138 fprintf(outf, "cpu%d: MSR_HWP_REQUEST_PKG: 0x%08llx " 3140 fprintf(outf, "cpu%d: MSR_HWP_REQUEST_PKG: 0x%08llx "
3139 "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x)\n", 3141 "(min %d max %d des %d epp 0x%x window 0x%x)\n",
3140 cpu, msr, 3142 cpu, msr,
3141 (unsigned int)(((msr) >> 0) & 0xff), 3143 (unsigned int)(((msr) >> 0) & 0xff),
3142 (unsigned int)(((msr) >> 8) & 0xff), 3144 (unsigned int)(((msr) >> 8) & 0xff),
@@ -3353,17 +3355,19 @@ void rapl_probe(unsigned int family, unsigned int model)
3353 case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */ 3355 case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */
3354 case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */ 3356 case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
3355 case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ 3357 case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
3356 do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; 3358 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_GFX | RAPL_PKG_POWER_INFO;
3357 BIC_PRESENT(BIC_PKG__); 3359 BIC_PRESENT(BIC_PKG__);
3358 BIC_PRESENT(BIC_RAM__); 3360 BIC_PRESENT(BIC_RAM__);
3359 if (rapl_joules) { 3361 if (rapl_joules) {
3360 BIC_PRESENT(BIC_Pkg_J); 3362 BIC_PRESENT(BIC_Pkg_J);
3361 BIC_PRESENT(BIC_Cor_J); 3363 BIC_PRESENT(BIC_Cor_J);
3362 BIC_PRESENT(BIC_RAM_J); 3364 BIC_PRESENT(BIC_RAM_J);
3365 BIC_PRESENT(BIC_GFX_J);
3363 } else { 3366 } else {
3364 BIC_PRESENT(BIC_PkgWatt); 3367 BIC_PRESENT(BIC_PkgWatt);
3365 BIC_PRESENT(BIC_CorWatt); 3368 BIC_PRESENT(BIC_CorWatt);
3366 BIC_PRESENT(BIC_RAMWatt); 3369 BIC_PRESENT(BIC_RAMWatt);
3370 BIC_PRESENT(BIC_GFXWatt);
3367 } 3371 }
3368 break; 3372 break;
3369 case INTEL_FAM6_HASWELL_X: /* HSX */ 3373 case INTEL_FAM6_HASWELL_X: /* HSX */
@@ -3478,7 +3482,7 @@ void perf_limit_reasons_probe(unsigned int family, unsigned int model)
3478int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p) 3482int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p)
3479{ 3483{
3480 unsigned long long msr; 3484 unsigned long long msr;
3481 unsigned int dts; 3485 unsigned int dts, dts2;
3482 int cpu; 3486 int cpu;
3483 3487
3484 if (!(do_dts || do_ptm)) 3488 if (!(do_dts || do_ptm))
@@ -3503,7 +3507,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
3503 fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n", 3507 fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n",
3504 cpu, msr, tcc_activation_temp - dts); 3508 cpu, msr, tcc_activation_temp - dts);
3505 3509
3506#ifdef THERM_DEBUG
3507 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr)) 3510 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr))
3508 return 0; 3511 return 0;
3509 3512
@@ -3511,11 +3514,10 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
3511 dts2 = (msr >> 8) & 0x7F; 3514 dts2 = (msr >> 8) & 0x7F;
3512 fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", 3515 fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
3513 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2); 3516 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
3514#endif
3515 } 3517 }
3516 3518
3517 3519
3518 if (do_dts) { 3520 if (do_dts && debug) {
3519 unsigned int resolution; 3521 unsigned int resolution;
3520 3522
3521 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) 3523 if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
@@ -3526,7 +3528,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
3526 fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n", 3528 fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n",
3527 cpu, msr, tcc_activation_temp - dts, resolution); 3529 cpu, msr, tcc_activation_temp - dts, resolution);
3528 3530
3529#ifdef THERM_DEBUG
3530 if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr)) 3531 if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr))
3531 return 0; 3532 return 0;
3532 3533
@@ -3534,7 +3535,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
3534 dts2 = (msr >> 8) & 0x7F; 3535 dts2 = (msr >> 8) & 0x7F;
3535 fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", 3536 fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
3536 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2); 3537 cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
3537#endif
3538 } 3538 }
3539 3539
3540 return 0; 3540 return 0;
@@ -4578,7 +4578,7 @@ int get_and_dump_counters(void)
4578} 4578}
4579 4579
4580void print_version() { 4580void print_version() {
4581 fprintf(outf, "turbostat version 17.02.24" 4581 fprintf(outf, "turbostat version 17.04.12"
4582 " - Len Brown <lenb@kernel.org>\n"); 4582 " - Len Brown <lenb@kernel.org>\n");
4583} 4583}
4584 4584
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 6a1ad58cb66f..9af09e8099c0 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -1,7 +1,14 @@
1LIBDIR := ../../../lib 1LIBDIR := ../../../lib
2BPFDIR := $(LIBDIR)/bpf 2BPFDIR := $(LIBDIR)/bpf
3APIDIR := ../../../include/uapi
4GENDIR := ../../../../include/generated
5GENHDR := $(GENDIR)/autoconf.h
3 6
4CFLAGS += -Wall -O2 -I../../../include/uapi -I$(LIBDIR) 7ifneq ($(wildcard $(GENHDR)),)
8 GENFLAGS := -DHAVE_GENHDR
9endif
10
11CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS)
5LDLIBS += -lcap 12LDLIBS += -lcap
6 13
7TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map 14TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index d1555e4240c0..c848e90b6421 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -30,6 +30,14 @@
30 30
31#include <bpf/bpf.h> 31#include <bpf/bpf.h>
32 32
33#ifdef HAVE_GENHDR
34# include "autoconf.h"
35#else
36# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
37# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
38# endif
39#endif
40
33#include "../../../include/linux/filter.h" 41#include "../../../include/linux/filter.h"
34 42
35#ifndef ARRAY_SIZE 43#ifndef ARRAY_SIZE
@@ -39,6 +47,8 @@
39#define MAX_INSNS 512 47#define MAX_INSNS 512
40#define MAX_FIXUPS 8 48#define MAX_FIXUPS 8
41 49
50#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
51
42struct bpf_test { 52struct bpf_test {
43 const char *descr; 53 const char *descr;
44 struct bpf_insn insns[MAX_INSNS]; 54 struct bpf_insn insns[MAX_INSNS];
@@ -53,6 +63,7 @@ struct bpf_test {
53 REJECT 63 REJECT
54 } result, result_unpriv; 64 } result, result_unpriv;
55 enum bpf_prog_type prog_type; 65 enum bpf_prog_type prog_type;
66 uint8_t flags;
56}; 67};
57 68
58/* Note we want this to be 64 bit aligned so that the end of our array is 69/* Note we want this to be 64 bit aligned so that the end of our array is
@@ -2432,6 +2443,30 @@ static struct bpf_test tests[] = {
2432 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2443 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2433 }, 2444 },
2434 { 2445 {
2446 "direct packet access: test15 (spill with xadd)",
2447 .insns = {
2448 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2449 offsetof(struct __sk_buff, data)),
2450 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2451 offsetof(struct __sk_buff, data_end)),
2452 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2453 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2454 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2455 BPF_MOV64_IMM(BPF_REG_5, 4096),
2456 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2457 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2458 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2459 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
2460 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
2461 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
2462 BPF_MOV64_IMM(BPF_REG_0, 0),
2463 BPF_EXIT_INSN(),
2464 },
2465 .errstr = "R2 invalid mem access 'inv'",
2466 .result = REJECT,
2467 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2468 },
2469 {
2435 "helper access to packet: test1, valid packet_ptr range", 2470 "helper access to packet: test1, valid packet_ptr range",
2436 .insns = { 2471 .insns = {
2437 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2472 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -2934,6 +2969,7 @@ static struct bpf_test tests[] = {
2934 .errstr_unpriv = "R0 pointer arithmetic prohibited", 2969 .errstr_unpriv = "R0 pointer arithmetic prohibited",
2935 .result_unpriv = REJECT, 2970 .result_unpriv = REJECT,
2936 .result = ACCEPT, 2971 .result = ACCEPT,
2972 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2937 }, 2973 },
2938 { 2974 {
2939 "valid map access into an array with a variable", 2975 "valid map access into an array with a variable",
@@ -2957,6 +2993,7 @@ static struct bpf_test tests[] = {
2957 .errstr_unpriv = "R0 pointer arithmetic prohibited", 2993 .errstr_unpriv = "R0 pointer arithmetic prohibited",
2958 .result_unpriv = REJECT, 2994 .result_unpriv = REJECT,
2959 .result = ACCEPT, 2995 .result = ACCEPT,
2996 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2960 }, 2997 },
2961 { 2998 {
2962 "valid map access into an array with a signed variable", 2999 "valid map access into an array with a signed variable",
@@ -2984,6 +3021,7 @@ static struct bpf_test tests[] = {
2984 .errstr_unpriv = "R0 pointer arithmetic prohibited", 3021 .errstr_unpriv = "R0 pointer arithmetic prohibited",
2985 .result_unpriv = REJECT, 3022 .result_unpriv = REJECT,
2986 .result = ACCEPT, 3023 .result = ACCEPT,
3024 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2987 }, 3025 },
2988 { 3026 {
2989 "invalid map access into an array with a constant", 3027 "invalid map access into an array with a constant",
@@ -3025,6 +3063,7 @@ static struct bpf_test tests[] = {
3025 .errstr = "R0 min value is outside of the array range", 3063 .errstr = "R0 min value is outside of the array range",
3026 .result_unpriv = REJECT, 3064 .result_unpriv = REJECT,
3027 .result = REJECT, 3065 .result = REJECT,
3066 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3028 }, 3067 },
3029 { 3068 {
3030 "invalid map access into an array with a variable", 3069 "invalid map access into an array with a variable",
@@ -3048,6 +3087,7 @@ static struct bpf_test tests[] = {
3048 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 3087 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
3049 .result_unpriv = REJECT, 3088 .result_unpriv = REJECT,
3050 .result = REJECT, 3089 .result = REJECT,
3090 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3051 }, 3091 },
3052 { 3092 {
3053 "invalid map access into an array with no floor check", 3093 "invalid map access into an array with no floor check",
@@ -3074,6 +3114,7 @@ static struct bpf_test tests[] = {
3074 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 3114 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
3075 .result_unpriv = REJECT, 3115 .result_unpriv = REJECT,
3076 .result = REJECT, 3116 .result = REJECT,
3117 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3077 }, 3118 },
3078 { 3119 {
3079 "invalid map access into an array with a invalid max check", 3120 "invalid map access into an array with a invalid max check",
@@ -3100,6 +3141,7 @@ static struct bpf_test tests[] = {
3100 .errstr = "invalid access to map value, value_size=48 off=44 size=8", 3141 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
3101 .result_unpriv = REJECT, 3142 .result_unpriv = REJECT,
3102 .result = REJECT, 3143 .result = REJECT,
3144 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3103 }, 3145 },
3104 { 3146 {
3105 "invalid map access into an array with a invalid max check", 3147 "invalid map access into an array with a invalid max check",
@@ -3129,6 +3171,7 @@ static struct bpf_test tests[] = {
3129 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 3171 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
3130 .result_unpriv = REJECT, 3172 .result_unpriv = REJECT,
3131 .result = REJECT, 3173 .result = REJECT,
3174 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3132 }, 3175 },
3133 { 3176 {
3134 "multiple registers share map_lookup_elem result", 3177 "multiple registers share map_lookup_elem result",
@@ -3252,6 +3295,7 @@ static struct bpf_test tests[] = {
3252 .result = REJECT, 3295 .result = REJECT,
3253 .errstr_unpriv = "R0 pointer arithmetic prohibited", 3296 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3254 .result_unpriv = REJECT, 3297 .result_unpriv = REJECT,
3298 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3255 }, 3299 },
3256 { 3300 {
3257 "constant register |= constant should keep constant type", 3301 "constant register |= constant should keep constant type",
@@ -3418,6 +3462,26 @@ static struct bpf_test tests[] = {
3418 .prog_type = BPF_PROG_TYPE_LWT_XMIT, 3462 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3419 }, 3463 },
3420 { 3464 {
3465 "overlapping checks for direct packet access",
3466 .insns = {
3467 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3468 offsetof(struct __sk_buff, data)),
3469 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3470 offsetof(struct __sk_buff, data_end)),
3471 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3472 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3473 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
3474 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3475 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
3476 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
3477 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
3478 BPF_MOV64_IMM(BPF_REG_0, 0),
3479 BPF_EXIT_INSN(),
3480 },
3481 .result = ACCEPT,
3482 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3483 },
3484 {
3421 "invalid access of tc_classid for LWT_IN", 3485 "invalid access of tc_classid for LWT_IN",
3422 .insns = { 3486 .insns = {
3423 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 3487 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
@@ -3961,7 +4025,208 @@ static struct bpf_test tests[] = {
3961 .result_unpriv = REJECT, 4025 .result_unpriv = REJECT,
3962 }, 4026 },
3963 { 4027 {
3964 "map element value (adjusted) is preserved across register spilling", 4028 "map element value or null is marked on register spilling",
4029 .insns = {
4030 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4031 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4032 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4033 BPF_LD_MAP_FD(BPF_REG_1, 0),
4034 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4035 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4036 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
4037 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4038 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4039 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
4040 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
4041 BPF_EXIT_INSN(),
4042 },
4043 .fixup_map2 = { 3 },
4044 .errstr_unpriv = "R0 leaks addr",
4045 .result = ACCEPT,
4046 .result_unpriv = REJECT,
4047 },
4048 {
4049 "map element value store of cleared call register",
4050 .insns = {
4051 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4052 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4053 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4054 BPF_LD_MAP_FD(BPF_REG_1, 0),
4055 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4056 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4057 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
4058 BPF_EXIT_INSN(),
4059 },
4060 .fixup_map2 = { 3 },
4061 .errstr_unpriv = "R1 !read_ok",
4062 .errstr = "R1 !read_ok",
4063 .result = REJECT,
4064 .result_unpriv = REJECT,
4065 },
4066 {
4067 "map element value with unaligned store",
4068 .insns = {
4069 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4070 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4071 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4072 BPF_LD_MAP_FD(BPF_REG_1, 0),
4073 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4074 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
4075 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
4076 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
4077 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
4078 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
4079 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4080 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
4081 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
4082 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
4083 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
4084 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
4085 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
4086 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
4087 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
4088 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
4089 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
4090 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
4091 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
4092 BPF_EXIT_INSN(),
4093 },
4094 .fixup_map2 = { 3 },
4095 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4096 .result = ACCEPT,
4097 .result_unpriv = REJECT,
4098 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4099 },
4100 {
4101 "map element value with unaligned load",
4102 .insns = {
4103 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4104 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4105 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4106 BPF_LD_MAP_FD(BPF_REG_1, 0),
4107 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4108 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
4109 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4110 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
4111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
4112 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
4113 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
4114 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4115 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
4116 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
4117 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
4118 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
4119 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
4120 BPF_EXIT_INSN(),
4121 },
4122 .fixup_map2 = { 3 },
4123 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4124 .result = ACCEPT,
4125 .result_unpriv = REJECT,
4126 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4127 },
4128 {
4129 "map element value illegal alu op, 1",
4130 .insns = {
4131 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4132 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4133 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4134 BPF_LD_MAP_FD(BPF_REG_1, 0),
4135 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4136 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4137 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
4138 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4139 BPF_EXIT_INSN(),
4140 },
4141 .fixup_map2 = { 3 },
4142 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4143 .errstr = "invalid mem access 'inv'",
4144 .result = REJECT,
4145 .result_unpriv = REJECT,
4146 },
4147 {
4148 "map element value illegal alu op, 2",
4149 .insns = {
4150 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4151 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4152 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4153 BPF_LD_MAP_FD(BPF_REG_1, 0),
4154 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4155 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4156 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
4157 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4158 BPF_EXIT_INSN(),
4159 },
4160 .fixup_map2 = { 3 },
4161 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4162 .errstr = "invalid mem access 'inv'",
4163 .result = REJECT,
4164 .result_unpriv = REJECT,
4165 },
4166 {
4167 "map element value illegal alu op, 3",
4168 .insns = {
4169 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4170 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4171 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4172 BPF_LD_MAP_FD(BPF_REG_1, 0),
4173 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4174 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4175 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
4176 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4177 BPF_EXIT_INSN(),
4178 },
4179 .fixup_map2 = { 3 },
4180 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4181 .errstr = "invalid mem access 'inv'",
4182 .result = REJECT,
4183 .result_unpriv = REJECT,
4184 },
4185 {
4186 "map element value illegal alu op, 4",
4187 .insns = {
4188 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4189 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4190 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4191 BPF_LD_MAP_FD(BPF_REG_1, 0),
4192 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4193 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4194 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
4195 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4196 BPF_EXIT_INSN(),
4197 },
4198 .fixup_map2 = { 3 },
4199 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4200 .errstr = "invalid mem access 'inv'",
4201 .result = REJECT,
4202 .result_unpriv = REJECT,
4203 },
4204 {
4205 "map element value illegal alu op, 5",
4206 .insns = {
4207 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4208 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4209 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4210 BPF_LD_MAP_FD(BPF_REG_1, 0),
4211 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4212 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4213 BPF_MOV64_IMM(BPF_REG_3, 4096),
4214 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4215 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4216 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
4217 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
4218 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
4219 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4220 BPF_EXIT_INSN(),
4221 },
4222 .fixup_map2 = { 3 },
4223 .errstr_unpriv = "R0 invalid mem access 'inv'",
4224 .errstr = "R0 invalid mem access 'inv'",
4225 .result = REJECT,
4226 .result_unpriv = REJECT,
4227 },
4228 {
4229 "map element value is preserved across register spilling",
3965 .insns = { 4230 .insns = {
3966 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4231 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3967 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4232 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
@@ -3983,6 +4248,7 @@ static struct bpf_test tests[] = {
3983 .errstr_unpriv = "R0 pointer arithmetic prohibited", 4248 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3984 .result = ACCEPT, 4249 .result = ACCEPT,
3985 .result_unpriv = REJECT, 4250 .result_unpriv = REJECT,
4251 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3986 }, 4252 },
3987 { 4253 {
3988 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds", 4254 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
@@ -4421,6 +4687,7 @@ static struct bpf_test tests[] = {
4421 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 4687 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
4422 .result = REJECT, 4688 .result = REJECT,
4423 .result_unpriv = REJECT, 4689 .result_unpriv = REJECT,
4690 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4424 }, 4691 },
4425 { 4692 {
4426 "invalid range check", 4693 "invalid range check",
@@ -4452,6 +4719,7 @@ static struct bpf_test tests[] = {
4452 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 4719 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
4453 .result = REJECT, 4720 .result = REJECT,
4454 .result_unpriv = REJECT, 4721 .result_unpriv = REJECT,
4722 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4455 } 4723 }
4456}; 4724};
4457 4725
@@ -4530,11 +4798,11 @@ static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
4530static void do_test_single(struct bpf_test *test, bool unpriv, 4798static void do_test_single(struct bpf_test *test, bool unpriv,
4531 int *passes, int *errors) 4799 int *passes, int *errors)
4532{ 4800{
4801 int fd_prog, expected_ret, reject_from_alignment;
4533 struct bpf_insn *prog = test->insns; 4802 struct bpf_insn *prog = test->insns;
4534 int prog_len = probe_filter_length(prog); 4803 int prog_len = probe_filter_length(prog);
4535 int prog_type = test->prog_type; 4804 int prog_type = test->prog_type;
4536 int fd_f1 = -1, fd_f2 = -1, fd_f3 = -1; 4805 int fd_f1 = -1, fd_f2 = -1, fd_f3 = -1;
4537 int fd_prog, expected_ret;
4538 const char *expected_err; 4806 const char *expected_err;
4539 4807
4540 do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3); 4808 do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3);
@@ -4547,8 +4815,19 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
4547 test->result_unpriv : test->result; 4815 test->result_unpriv : test->result;
4548 expected_err = unpriv && test->errstr_unpriv ? 4816 expected_err = unpriv && test->errstr_unpriv ?
4549 test->errstr_unpriv : test->errstr; 4817 test->errstr_unpriv : test->errstr;
4818
4819 reject_from_alignment = fd_prog < 0 &&
4820 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
4821 strstr(bpf_vlog, "Unknown alignment.");
4822#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
4823 if (reject_from_alignment) {
4824 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
4825 strerror(errno));
4826 goto fail_log;
4827 }
4828#endif
4550 if (expected_ret == ACCEPT) { 4829 if (expected_ret == ACCEPT) {
4551 if (fd_prog < 0) { 4830 if (fd_prog < 0 && !reject_from_alignment) {
4552 printf("FAIL\nFailed to load prog '%s'!\n", 4831 printf("FAIL\nFailed to load prog '%s'!\n",
4553 strerror(errno)); 4832 strerror(errno));
4554 goto fail_log; 4833 goto fail_log;
@@ -4558,14 +4837,15 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
4558 printf("FAIL\nUnexpected success to load!\n"); 4837 printf("FAIL\nUnexpected success to load!\n");
4559 goto fail_log; 4838 goto fail_log;
4560 } 4839 }
4561 if (!strstr(bpf_vlog, expected_err)) { 4840 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
4562 printf("FAIL\nUnexpected error message!\n"); 4841 printf("FAIL\nUnexpected error message!\n");
4563 goto fail_log; 4842 goto fail_log;
4564 } 4843 }
4565 } 4844 }
4566 4845
4567 (*passes)++; 4846 (*passes)++;
4568 printf("OK\n"); 4847 printf("OK%s\n", reject_from_alignment ?
4848 " (NOTE: reject due to unknown alignment)" : "");
4569close_fds: 4849close_fds:
4570 close(fd_prog); 4850 close(fd_prog);
4571 close(fd_f1); 4851 close(fd_f1);
diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile
index 1c5d0575802e..bf13fc2297aa 100644
--- a/tools/testing/selftests/powerpc/Makefile
+++ b/tools/testing/selftests/powerpc/Makefile
@@ -34,34 +34,34 @@ endif
34all: $(SUB_DIRS) 34all: $(SUB_DIRS)
35 35
36$(SUB_DIRS): 36$(SUB_DIRS):
37 BUILD_TARGET=$$OUTPUT/$@; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $@ all 37 BUILD_TARGET=$(OUTPUT)/$@; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $@ all
38 38
39include ../lib.mk 39include ../lib.mk
40 40
41override define RUN_TESTS 41override define RUN_TESTS
42 @for TARGET in $(SUB_DIRS); do \ 42 @for TARGET in $(SUB_DIRS); do \
43 BUILD_TARGET=$$OUTPUT/$$TARGET; \ 43 BUILD_TARGET=$(OUTPUT)/$$TARGET; \
44 $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests;\ 44 $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests;\
45 done; 45 done;
46endef 46endef
47 47
48override define INSTALL_RULE 48override define INSTALL_RULE
49 @for TARGET in $(SUB_DIRS); do \ 49 @for TARGET in $(SUB_DIRS); do \
50 BUILD_TARGET=$$OUTPUT/$$TARGET; \ 50 BUILD_TARGET=$(OUTPUT)/$$TARGET; \
51 $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install;\ 51 $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install;\
52 done; 52 done;
53endef 53endef
54 54
55override define EMIT_TESTS 55override define EMIT_TESTS
56 @for TARGET in $(SUB_DIRS); do \ 56 @for TARGET in $(SUB_DIRS); do \
57 BUILD_TARGET=$$OUTPUT/$$TARGET; \ 57 BUILD_TARGET=$(OUTPUT)/$$TARGET; \
58 $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests;\ 58 $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests;\
59 done; 59 done;
60endef 60endef
61 61
62clean: 62clean:
63 @for TARGET in $(SUB_DIRS); do \ 63 @for TARGET in $(SUB_DIRS); do \
64 BUILD_TARGET=$$OUTPUT/$$TARGET; \ 64 BUILD_TARGET=$(OUTPUT)/$$TARGET; \
65 $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean; \ 65 $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean; \
66 done; 66 done;
67 rm -f tags 67 rm -f tags
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 276139a24e6f..702f8108608d 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -392,6 +392,25 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data)
392} 392}
393 393
394/** 394/**
395 * kvm_vgic_init_cpu_hardware - initialize the GIC VE hardware
396 *
397 * For a specific CPU, initialize the GIC VE hardware.
398 */
399void kvm_vgic_init_cpu_hardware(void)
400{
401 BUG_ON(preemptible());
402
403 /*
404 * We want to make sure the list registers start out clear so that we
405 * only have the program the used registers.
406 */
407 if (kvm_vgic_global_state.type == VGIC_V2)
408 vgic_v2_init_lrs();
409 else
410 kvm_call_hyp(__vgic_v3_init_lrs);
411}
412
413/**
395 * kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable 414 * kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable
396 * according to the host GIC model. Accordingly calls either 415 * according to the host GIC model. Accordingly calls either
397 * vgic_v2/v3_probe which registers the KVM_DEVICE that can be 416 * vgic_v2/v3_probe which registers the KVM_DEVICE that can be
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index a3ad7ff95c9b..0a4283ed9aa7 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -229,7 +229,15 @@ static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
229 val = vmcr.ctlr; 229 val = vmcr.ctlr;
230 break; 230 break;
231 case GIC_CPU_PRIMASK: 231 case GIC_CPU_PRIMASK:
232 val = vmcr.pmr; 232 /*
233 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
234 * the PMR field as GICH_VMCR.VMPriMask rather than
235 * GICC_PMR.Priority, so we expose the upper five bits of
236 * priority mask to userspace using the lower bits in the
237 * unsigned long.
238 */
239 val = (vmcr.pmr & GICV_PMR_PRIORITY_MASK) >>
240 GICV_PMR_PRIORITY_SHIFT;
233 break; 241 break;
234 case GIC_CPU_BINPOINT: 242 case GIC_CPU_BINPOINT:
235 val = vmcr.bpr; 243 val = vmcr.bpr;
@@ -262,7 +270,15 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
262 vmcr.ctlr = val; 270 vmcr.ctlr = val;
263 break; 271 break;
264 case GIC_CPU_PRIMASK: 272 case GIC_CPU_PRIMASK:
265 vmcr.pmr = val; 273 /*
274 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
275 * the PMR field as GICH_VMCR.VMPriMask rather than
276 * GICC_PMR.Priority, so we expose the upper five bits of
277 * priority mask to userspace using the lower bits in the
278 * unsigned long.
279 */
280 vmcr.pmr = (val << GICV_PMR_PRIORITY_SHIFT) &
281 GICV_PMR_PRIORITY_MASK;
266 break; 282 break;
267 case GIC_CPU_BINPOINT: 283 case GIC_CPU_BINPOINT:
268 vmcr.bpr = val; 284 vmcr.bpr = val;
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index b834ecdf3225..b637d9c7afe3 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -36,6 +36,21 @@ static unsigned long *u64_to_bitmask(u64 *val)
36 return (unsigned long *)val; 36 return (unsigned long *)val;
37} 37}
38 38
39static inline void vgic_v2_write_lr(int lr, u32 val)
40{
41 void __iomem *base = kvm_vgic_global_state.vctrl_base;
42
43 writel_relaxed(val, base + GICH_LR0 + (lr * 4));
44}
45
46void vgic_v2_init_lrs(void)
47{
48 int i;
49
50 for (i = 0; i < kvm_vgic_global_state.nr_lr; i++)
51 vgic_v2_write_lr(i, 0);
52}
53
39void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu) 54void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu)
40{ 55{
41 struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; 56 struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
@@ -191,8 +206,8 @@ void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
191 GICH_VMCR_ALIAS_BINPOINT_MASK; 206 GICH_VMCR_ALIAS_BINPOINT_MASK;
192 vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & 207 vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
193 GICH_VMCR_BINPOINT_MASK; 208 GICH_VMCR_BINPOINT_MASK;
194 vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & 209 vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) <<
195 GICH_VMCR_PRIMASK_MASK; 210 GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
196 211
197 vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr; 212 vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
198} 213}
@@ -207,8 +222,8 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
207 GICH_VMCR_ALIAS_BINPOINT_SHIFT; 222 GICH_VMCR_ALIAS_BINPOINT_SHIFT;
208 vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> 223 vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
209 GICH_VMCR_BINPOINT_SHIFT; 224 GICH_VMCR_BINPOINT_SHIFT;
210 vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> 225 vmcrp->pmr = ((vmcr & GICH_VMCR_PRIMASK_MASK) >>
211 GICH_VMCR_PRIMASK_SHIFT; 226 GICH_VMCR_PRIMASK_SHIFT) << GICV_PMR_PRIORITY_SHIFT;
212} 227}
213 228
214void vgic_v2_enable(struct kvm_vcpu *vcpu) 229void vgic_v2_enable(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index db28f7cadab2..6cf557e9f718 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -81,11 +81,18 @@ static inline bool irq_is_pending(struct vgic_irq *irq)
81 return irq->pending_latch || irq->line_level; 81 return irq->pending_latch || irq->line_level;
82} 82}
83 83
84/*
85 * This struct provides an intermediate representation of the fields contained
86 * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC
87 * state to userspace can generate either GICv2 or GICv3 CPU interface
88 * registers regardless of the hardware backed GIC used.
89 */
84struct vgic_vmcr { 90struct vgic_vmcr {
85 u32 ctlr; 91 u32 ctlr;
86 u32 abpr; 92 u32 abpr;
87 u32 bpr; 93 u32 bpr;
88 u32 pmr; 94 u32 pmr; /* Priority mask field in the GICC_PMR and
95 * ICC_PMR_EL1 priority field format */
89 /* Below member variable are valid only for GICv3 */ 96 /* Below member variable are valid only for GICv3 */
90 u32 grpen0; 97 u32 grpen0;
91 u32 grpen1; 98 u32 grpen1;
@@ -130,6 +137,8 @@ int vgic_v2_map_resources(struct kvm *kvm);
130int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, 137int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
131 enum vgic_type); 138 enum vgic_type);
132 139
140void vgic_v2_init_lrs(void);
141
133static inline void vgic_get_irq_kref(struct vgic_irq *irq) 142static inline void vgic_get_irq_kref(struct vgic_irq *irq)
134{ 143{
135 if (irq->intid < VGIC_MIN_LPI) 144 if (irq->intid < VGIC_MIN_LPI)
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index a29786dd9522..4d28a9ddbee0 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -870,7 +870,8 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
870 continue; 870 continue;
871 871
872 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); 872 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
873 kvm->buses[bus_idx]->ioeventfd_count--; 873 if (kvm->buses[bus_idx])
874 kvm->buses[bus_idx]->ioeventfd_count--;
874 ioeventfd_release(p); 875 ioeventfd_release(p);
875 ret = 0; 876 ret = 0;
876 break; 877 break;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a17d78759727..88257b311cb5 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -727,8 +727,11 @@ static void kvm_destroy_vm(struct kvm *kvm)
727 list_del(&kvm->vm_list); 727 list_del(&kvm->vm_list);
728 spin_unlock(&kvm_lock); 728 spin_unlock(&kvm_lock);
729 kvm_free_irq_routing(kvm); 729 kvm_free_irq_routing(kvm);
730 for (i = 0; i < KVM_NR_BUSES; i++) 730 for (i = 0; i < KVM_NR_BUSES; i++) {
731 kvm_io_bus_destroy(kvm->buses[i]); 731 if (kvm->buses[i])
732 kvm_io_bus_destroy(kvm->buses[i]);
733 kvm->buses[i] = NULL;
734 }
732 kvm_coalesced_mmio_free(kvm); 735 kvm_coalesced_mmio_free(kvm);
733#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 736#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
734 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 737 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
@@ -1062,7 +1065,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
1062 * changes) is disallowed above, so any other attribute changes getting 1065 * changes) is disallowed above, so any other attribute changes getting
1063 * here can be skipped. 1066 * here can be skipped.
1064 */ 1067 */
1065 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 1068 if (as_id == 0 && (change == KVM_MR_CREATE || change == KVM_MR_MOVE)) {
1066 r = kvm_iommu_map_pages(kvm, &new); 1069 r = kvm_iommu_map_pages(kvm, &new);
1067 return r; 1070 return r;
1068 } 1071 }
@@ -3474,6 +3477,8 @@ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
3474 }; 3477 };
3475 3478
3476 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3479 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3480 if (!bus)
3481 return -ENOMEM;
3477 r = __kvm_io_bus_write(vcpu, bus, &range, val); 3482 r = __kvm_io_bus_write(vcpu, bus, &range, val);
3478 return r < 0 ? r : 0; 3483 return r < 0 ? r : 0;
3479} 3484}
@@ -3491,6 +3496,8 @@ int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
3491 }; 3496 };
3492 3497
3493 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3498 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3499 if (!bus)
3500 return -ENOMEM;
3494 3501
3495 /* First try the device referenced by cookie. */ 3502 /* First try the device referenced by cookie. */
3496 if ((cookie >= 0) && (cookie < bus->dev_count) && 3503 if ((cookie >= 0) && (cookie < bus->dev_count) &&
@@ -3541,6 +3548,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
3541 }; 3548 };
3542 3549
3543 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3550 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3551 if (!bus)
3552 return -ENOMEM;
3544 r = __kvm_io_bus_read(vcpu, bus, &range, val); 3553 r = __kvm_io_bus_read(vcpu, bus, &range, val);
3545 return r < 0 ? r : 0; 3554 return r < 0 ? r : 0;
3546} 3555}
@@ -3553,6 +3562,9 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
3553 struct kvm_io_bus *new_bus, *bus; 3562 struct kvm_io_bus *new_bus, *bus;
3554 3563
3555 bus = kvm->buses[bus_idx]; 3564 bus = kvm->buses[bus_idx];
3565 if (!bus)
3566 return -ENOMEM;
3567
3556 /* exclude ioeventfd which is limited by maximum fd */ 3568 /* exclude ioeventfd which is limited by maximum fd */
3557 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 3569 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
3558 return -ENOSPC; 3570 return -ENOSPC;
@@ -3572,37 +3584,41 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
3572} 3584}
3573 3585
3574/* Caller must hold slots_lock. */ 3586/* Caller must hold slots_lock. */
3575int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 3587void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
3576 struct kvm_io_device *dev) 3588 struct kvm_io_device *dev)
3577{ 3589{
3578 int i, r; 3590 int i;
3579 struct kvm_io_bus *new_bus, *bus; 3591 struct kvm_io_bus *new_bus, *bus;
3580 3592
3581 bus = kvm->buses[bus_idx]; 3593 bus = kvm->buses[bus_idx];
3582 r = -ENOENT; 3594 if (!bus)
3595 return;
3596
3583 for (i = 0; i < bus->dev_count; i++) 3597 for (i = 0; i < bus->dev_count; i++)
3584 if (bus->range[i].dev == dev) { 3598 if (bus->range[i].dev == dev) {
3585 r = 0;
3586 break; 3599 break;
3587 } 3600 }
3588 3601
3589 if (r) 3602 if (i == bus->dev_count)
3590 return r; 3603 return;
3591 3604
3592 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) * 3605 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
3593 sizeof(struct kvm_io_range)), GFP_KERNEL); 3606 sizeof(struct kvm_io_range)), GFP_KERNEL);
3594 if (!new_bus) 3607 if (!new_bus) {
3595 return -ENOMEM; 3608 pr_err("kvm: failed to shrink bus, removing it completely\n");
3609 goto broken;
3610 }
3596 3611
3597 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 3612 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
3598 new_bus->dev_count--; 3613 new_bus->dev_count--;
3599 memcpy(new_bus->range + i, bus->range + i + 1, 3614 memcpy(new_bus->range + i, bus->range + i + 1,
3600 (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); 3615 (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
3601 3616
3617broken:
3602 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 3618 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
3603 synchronize_srcu_expedited(&kvm->srcu); 3619 synchronize_srcu_expedited(&kvm->srcu);
3604 kfree(bus); 3620 kfree(bus);
3605 return r; 3621 return;
3606} 3622}
3607 3623
3608struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 3624struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
@@ -3615,6 +3631,8 @@ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
3615 srcu_idx = srcu_read_lock(&kvm->srcu); 3631 srcu_idx = srcu_read_lock(&kvm->srcu);
3616 3632
3617 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 3633 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
3634 if (!bus)
3635 goto out_unlock;
3618 3636
3619 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); 3637 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
3620 if (dev_idx < 0) 3638 if (dev_idx < 0)